prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'),
|
u('z')
|
pandas.compat.u
|
import argparse
import json
import logging
from enum import Enum
from typing import List, Optional
import pandas
from annofabapi.models import ProjectMemberRole, Task, TaskPhase, TaskStatus
import annofabcli
import annofabcli.common.cli
from annofabcli import AnnofabApiFacade
from annofabcli.common.cli import (
AbstractCommandLineInterface,
ArgumentParser,
build_annofabapi_resource_and_login,
get_json_from_args,
get_wait_options_from_args,
)
from annofabcli.common.dataclasses import WaitOptions
from annofabcli.common.download import DownloadingFile
from annofabcli.common.enums import FormatArgument
logger = logging.getLogger(__name__)
DEFAULT_WAIT_OPTIONS = WaitOptions(interval=60, max_tries=360)
DEFAULT_TASK_ID_DELIMITER = "_"
class TaskStatusForSummary(Enum):
"""
TaskStatusのサマリー用(知りたい情報をstatusにしている)
"""
ANNOTATION_NOT_STARTED = "annotation_not_started"
"""教師付未着手"""
INSPECTION_NOT_STARTED = "inspection_not_started"
"""検査未着手"""
ACCEPTANCE_NOT_STARTED = "acceptance_not_started"
"""受入未着手"""
WORKING = "working"
BREAK = "break"
ON_HOLD = "on_hold"
COMPLETE = "complete"
@staticmethod
def from_task(task: Task) -> "TaskStatusForSummary":
status = task["status"]
if status == TaskStatus.NOT_STARTED.value:
phase = task["phase"]
if phase == TaskPhase.ANNOTATION.value:
return TaskStatusForSummary.ANNOTATION_NOT_STARTED
elif phase == TaskPhase.INSPECTION.value:
return TaskStatusForSummary.INSPECTION_NOT_STARTED
elif phase == TaskPhase.ACCEPTANCE.value:
return TaskStatusForSummary.ACCEPTANCE_NOT_STARTED
else:
raise RuntimeError(f"phase={phase}が対象外です。")
else:
return TaskStatusForSummary(status)
def add_info_to_task(task: Task) -> Task:
task["status_for_summary"] = TaskStatusForSummary.from_task(task).value
return task
def create_task_count_summary_df(task_list: List[Task]) -> pandas.DataFrame:
"""
タスク数の集計結果が格納されたDataFrameを取得する。
Args:
task_list:
Returns:
"""
def add_columns_if_not_exists(df: pandas.DataFrame, column: str):
if column not in df.columns:
df[column] = 0
df_task = pandas.DataFrame([add_info_to_task(t) for t in task_list])
df_summary = df_task.pivot_table(
values="task_id", index=["account_id"], columns=["status_for_summary"], aggfunc="count", fill_value=0
).reset_index()
for status in TaskStatusForSummary:
add_columns_if_not_exists(df_summary, status.value)
return df_summary
class SummarizeTaskCountByUser(AbstractCommandLineInterface):
def create_user_df(self, project_id: str, account_id_list: List[str]) -> pandas.DataFrame:
user_list = []
for account_id in account_id_list:
user = self.facade.get_project_member_from_account_id(project_id=project_id, account_id=account_id)
if user is not None:
user_list.append(user)
return
|
pandas.DataFrame(user_list, columns=["account_id", "user_id", "username", "biography"])
|
pandas.DataFrame
|
"""Implements the utilities to generate general multi-objective mixed-integer linear program instances
Referenced articles:
@article{mavrotas2005multi,
title={Multi-criteria branch and bound: A vector maximization algorithm for mixed 0-1 multiple objective linear programming},
author={<NAME> and <NAME>},
journal={Applied mathematics and computation},
volume={171},
number={1},
pages={53--71},
year={2005},
publisher={Elsevier}
}
@article{boland2015criterion,
title={A criterion space search algorithm for biobjective mixed integer programming: The triangle splitting method},
author={<NAME> and <NAME> and <NAME>},
journal={INFORMS Journal on Computing},
volume={27},
number={4},
pages={597--618},
year={2015},
publisher={INFORMS}
}
@article{kirlik2014new,
title={A new algorithm for generating all nondominated solutions of multiobjective discrete optimization problems},
author={<NAME> and <NAME>},
journal={European Journal of Operational Research},
volume={232},
number={3},
pages={479--488},
year={2014},
publisher={Elsevier}
}
"""
from abc import ABCMeta, abstractmethod
from gurobipy import GRB, LinExpr, Model
import numpy as np
import os
import pandas as pd
class MomilpInstanceParameterSet:
"""Implements MOMILP instance parameter set"""
def __init__(
self,
constraint_coeff_range=(-1, 20),
continuous_var_obj_coeff_range=(-10, 10),
# if 'True', all the integer variables have zero coefficient in the discrete objectives
dummy_discrete_obj=True,
integer_var_obj_coeff_range=(-200, 200),
# num of binary variables out of the num of integer vars
num_binary_vars=10,
num_constraints=20,
num_continuous_vars=10,
# starting from the objective function at the first index
num_discrete_objs=1,
num_integer_vars=10,
num_objs=3,
obj_sense="max",
rhs_range=(50, 100)):
self.constraint_coeff_range = constraint_coeff_range
self.continuous_var_obj_coeff_range = continuous_var_obj_coeff_range
self.dummy_discrete_obj = dummy_discrete_obj
self.integer_var_obj_coeff_range = integer_var_obj_coeff_range
self.num_binary_vars = num_binary_vars
self.num_constraints = num_constraints
self.num_continuous_vars = num_continuous_vars
self.num_discrete_objs = num_discrete_objs
self.num_integer_vars = num_integer_vars
self.num_objs = num_objs
self.obj_sense = obj_sense
self.rhs_range = rhs_range
def to_dict(self):
"""Returns the dictionary representation of the parameter set"""
return self.__dict__
class MomilpInstance(metaclass=ABCMeta):
"""Implements an abstract MOMILP instance class"""
@abstractmethod
def write(self, path):
"""Writes the model"""
class MomilpInstanceData:
"""Implements a MOMILP instance data"""
def __init__(
self, param_2_value, constraint_coeff_df=None, continuous_var_obj_coeff_df=None,
integer_var_obj_coeff_df=None, rhs=None):
self._constraint_coeff_df = constraint_coeff_df
self._continuous_var_obj_coeff_df = continuous_var_obj_coeff_df
self._integer_var_obj_coeff_df = integer_var_obj_coeff_df
self._param_2_value = param_2_value
self._rhs = rhs
def constraint_coeff_df(self):
"""Returns the constraint coefficient data frame
NOTE: An (m by n) matrix where rows are constraints and columns are variables"""
return self._constraint_coeff_df
def continuous_var_obj_coeff_df(self):
"""Returns the objective functions coefficients data frame for the continuous variables
NOTE: An (m by n) matrix where rows are variables and columns are objective functions"""
return self._continuous_var_obj_coeff_df
def integer_var_obj_coeff_df(self):
"""Returns the objective functions coefficients data frame for the integer variables
NOTE: An (m by n) matrix where rows are variables and columns are objective functions"""
return self._integer_var_obj_coeff_df
def rhs(self):
"""Returns the right-hand-side values of the constraints
NOTE: A series of length m"""
return self._rhs
class KnapsackFileInstanceData(MomilpInstanceData):
"""Implements a Knapsack problem instance data retrived from a file
NOTE: Based on the data input schema defined in Kirlik and Sayin. (2014):
http://home.ku.edu.tr/~moolibrary/
A '.dat' file describing a multi-objective 0-1 knapsack problem
Line 1: Number of objective functions, p
Line 2: Number of objects, n
Line 3: Capacity of the knapsack, W
Line 5: Profits of the objects in each objective function, V
Line 6: Weights of the objects, w
"""
_ESCAPED_CHARACTERS = ["[", "]"]
_LINE_DELIMITER = ", "
_NEW_LINE_SEPARATOR = "\n"
def __init__(self, file_name, param_2_value):
super(KnapsackFileInstanceData, self).__init__(param_2_value)
self._file_name = file_name
self._create()
def _create(self):
"""Creates the instance data"""
lines = []
with open(self._file_name, "r") as f:
lines = f.readlines()
# read the number of objectives
num_objectives = int(self._process_lines(lines).iloc[0,0])
assert num_objectives == self._param_2_value["num_objs"], \
"the number of objectives in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_objectives, self._param_2_value["num_objs"])
# read the number of objects
num_continuous_vars = self._param_2_value["num_continuous_vars"]
assert num_continuous_vars == 0, "there should not be any continuous variables"
num_binary_vars = self._param_2_value["num_binary_vars"]
num_objects = int(self._process_lines(lines).iloc[0,0])
assert num_objects == num_binary_vars, \
"the number of objects in the data file is not equal to the number of binary variables in the " \
"configuration, '%d' != '%d'" % (num_objects, num_continuous_vars + num_binary_vars)
# read the knapsack capacities
self._rhs = self._process_lines(lines).iloc[0, :]
num_constraints = len(self._rhs)
assert num_constraints == self._param_2_value["num_constraints"], \
"the number of constraints in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_constraints, self._param_2_value["num_constraints"])
# read the objective function coefficients
self._continuous_var_obj_coeff_df = pd.DataFrame()
self._integer_var_obj_coeff_df = self._process_lines(lines, to_index=num_objectives).T
# read the constraint coefficients
self._constraint_coeff_df = self._process_lines(lines, to_index=num_constraints)
def _process_lines(self, lines, from_index=0, to_index=1):
"""Processes the lines between the indices, removes the processed lines, and returns the data frame for the
processed data"""
rows = []
for line in lines[from_index:to_index]:
for char in KnapsackFileInstanceData._ESCAPED_CHARACTERS:
line = line.replace(char, "")
line = line.split(KnapsackFileInstanceData._NEW_LINE_SEPARATOR)[0]
values = line.split(KnapsackFileInstanceData._LINE_DELIMITER)
if values[-1][-1] == ",":
values[-1] = values[-1][:-1]
if not values[-1]:
values = values[:-1]
rows.append(values)
del lines[from_index:to_index]
df = pd.DataFrame(rows, dtype='float')
return df
class MomilpFileInstanceData(MomilpInstanceData):
"""Implements a MOMILP instance data retrived from a file
NOTE: Based on the data input schema defined in Boland et al. (2015):
A '.txt' file describing a bi-objective problem
Line 1: Number of constraints, m
Line 2: Number of continuous variables, n_c
Line 3: Number of binary variables, n_b
Line 4: Array of coefficients for the first objective and the continuous variables, c^{1}
Line 5: Array of coefficients for the first objective and the binary variables, f^{1}
Line 6: Array of coefficients for the second objective and the continuous variables, c^{2}
Line 7: Array of coefficients for the second objective and the binary variables, f^{2}
Next 'n_c' lines: Array of constraint matrix coefficients for the continuous variables, a_{i,j}
Next line: Array of constraint matrix coefficients for the binary variables, a^{'}_{j}
Next line: Array of constraint right-hand-side values, b_j
The instance is converted to a three-obj problem by creating an additional objective with all zero coefficients.
"""
_INTEGER_VARIABLE_SUM_CONTRAINT_RHS_MULTIPLIER = 1/3
_LINE_DELIMITER = " "
_NEW_LINE_SEPARATOR = "\n"
def __init__(self, file_name, param_2_value):
super(MomilpFileInstanceData, self).__init__(param_2_value)
self._file_name = file_name
self._create()
def _create(self):
"""Creates the instance data"""
lines = []
with open(self._file_name, "r") as f:
lines = f.readlines()
num_constraints = int(self._process_lines(lines).iloc[0,0])
assert num_constraints == self._param_2_value["num_constraints"], \
"the number of constraints in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_constraints, self._param_2_value["num_constraints"])
num_continuous_vars = int(self._process_lines(lines).iloc[0,0])
assert num_continuous_vars == self._param_2_value["num_continuous_vars"], \
"the number of continuous vars in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_continuous_vars, self._param_2_value["num_continuous_vars"])
num_binary_vars = int(self._process_lines(lines).iloc[0,0])
assert num_binary_vars == self._param_2_value["num_binary_vars"], \
"the number of binary vars in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_binary_vars, self._param_2_value["num_binary_vars"])
# since we solve the BOMILP as TOMILP in the momilp solver, and the default discrete obj index is zero, we
# create zero arrays as the coefficient vectors for the first objective
self._continuous_var_obj_coeff_df = pd.DataFrame(np.zeros(shape=(1, num_continuous_vars)))
self._integer_var_obj_coeff_df = pd.DataFrame(np.zeros(shape=(1, num_binary_vars)))
self._continuous_var_obj_coeff_df = self._continuous_var_obj_coeff_df.append(self._process_lines(lines))
self._integer_var_obj_coeff_df = self._integer_var_obj_coeff_df.append(self._process_lines(lines))
self._continuous_var_obj_coeff_df = self._continuous_var_obj_coeff_df.append(
self._process_lines(lines)).reset_index(drop=True).T
self._integer_var_obj_coeff_df = self._integer_var_obj_coeff_df.append(
self._process_lines(lines)).reset_index(drop=True).T
continuous_var_columns = [i for i in range(num_continuous_vars)]
binary_var_columns = [len(continuous_var_columns) + i for i in range(num_binary_vars)]
continuous_var_constraint_df = self._process_lines(lines, to_index=num_continuous_vars).T
continuous_var_constraint_df = continuous_var_constraint_df.append(
pd.DataFrame(np.zeros(shape=(1, num_continuous_vars)))).reset_index(drop=True)
continuous_var_constraint_df.columns = continuous_var_columns
binary_var_constraint_df = pd.DataFrame(np.diag(self._process_lines(lines).iloc[0,:])).append(
pd.DataFrame(np.zeros(shape=(num_constraints - num_binary_vars - 1, num_binary_vars)))).append(
pd.DataFrame(np.ones(shape=(1, num_binary_vars)))).reset_index(drop=True)
binary_var_constraint_df.columns = binary_var_columns
self._constraint_coeff_df = pd.concat([continuous_var_constraint_df, binary_var_constraint_df], axis=1)
binary_var_sum_rhs = num_binary_vars * MomilpFileInstanceData._INTEGER_VARIABLE_SUM_CONTRAINT_RHS_MULTIPLIER
self._rhs = self._process_lines(lines).iloc[0, :].append(pd.Series(binary_var_sum_rhs)).reset_index(drop=True)
def _process_lines(self, lines, from_index=0, to_index=1):
"""Processes the lines between the indices, removes the processed lines, and returns the data frame for the
processed data"""
rows = []
for line in lines[from_index:to_index]:
line = line.split(MomilpFileInstanceData._NEW_LINE_SEPARATOR)[0]
values = line.split(MomilpFileInstanceData._LINE_DELIMITER)
if not values[-1]:
values = values[:-1]
rows.append(values)
del lines[from_index:to_index]
df = pd.DataFrame(rows, dtype='float')
return df
def constraint_coeff_df(self):
"""Returns the constraint coefficient data frame
NOTE: An (m by n) matrix where rows are constraints and columns are variables"""
return self._constraint_coeff_df
def continuous_var_obj_coeff_df(self):
"""Returns the objective functions coefficients data frame for the continuous variables
NOTE: An (m by n) matrix where rows are variables and columns are objective functions"""
return self._continuous_var_obj_coeff_df
def integer_var_obj_coeff_df(self):
"""Returns the objective functions coefficients data frame for the integer variables
NOTE: An (m by n) matrix where rows are variables and columns are objective functions"""
return self._integer_var_obj_coeff_df
def rhs(self):
"""Returns the right-hand-side values of the constraints
NOTE: A series of length m"""
return self._rhs
class MomilpRandomInstanceData(MomilpInstanceData):
"""Implements a MOMILP random instance data
NOTE: Based on the data generation schema defined in Mavrotas and Diakoulaki (2005) and Boland et al. (2015)"""
_INTEGER_VARIABLE_SUM_CONTRAINT_RHS_MULTIPLIER = 1/3
def __init__(self, param_2_value, np_rand_num_generator_seed=0):
np.random.seed(np_rand_num_generator_seed)
super(MomilpRandomInstanceData, self).__init__(param_2_value)
self._create()
def _create(self):
"""Creates the data"""
self._create_constraint_coeff_df()
self._create_continuous_var_obj_coeff_df()
self._create_integer_var_obj_coeff_df()
self._create_rhs()
def _create_constraint_coeff_df(self):
"""Create the data frame of constraint coefficients"""
num_constraints = self._param_2_value["num_constraints"]
num_continuous_vars = self._param_2_value["num_continuous_vars"]
num_integer_vars = self._param_2_value["num_integer_vars"]
(low, high) = self._param_2_value["constraint_coeff_range"]
continuous_var_columns = [i for i in range(num_continuous_vars)]
integer_var_columns = [len(continuous_var_columns) + i for i in range(num_integer_vars)]
continuous_var_constraint_df = pd.DataFrame(
np.random.random_integers(low, high, size=(num_constraints - 1, num_continuous_vars))).append(
pd.DataFrame(np.zeros(shape=(1, num_continuous_vars)))).reset_index(drop=True)
continuous_var_constraint_df.columns = continuous_var_columns
integer_var_constraint_df = pd.DataFrame(
np.diag(np.random.random_integers(low, high=high, size=num_integer_vars))).append(
pd.DataFrame(np.zeros(shape=(num_constraints - num_integer_vars - 1, num_integer_vars)))).append(
pd.DataFrame(np.ones(shape=(1, num_integer_vars)))).reset_index(drop=True)
integer_var_constraint_df.columns = integer_var_columns
self._constraint_coeff_df =
|
pd.concat([continuous_var_constraint_df, integer_var_constraint_df], axis=1)
|
pandas.concat
|
import pandas as pd
import numpy as np
import scipy
import os
from sklearn.datasets import load_boston
from typing import Tuple, Optional, Callable
# import utils
from pred_diff.datasets import utils
base_dir = os.path.dirname(__file__)
class SyntheticDataset:
def __init__(self, function: Callable[[np.ndarray], np.ndarray], mean: Optional[np.ndarray] = None,
cov: Optional[np.ndarray] = None, noise: float = 0.):
self.function = function
self.noise = noise
self.mean = np.array([0, 0, 0, 0]) if mean is None else np.array(mean)
self.cov = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]) if cov is None else np.array(cov)
assert np.alltrue(np.linalg.eigvals(self.cov) > 0), f'covariance matrix not valid: \n{self.cov}'
assert self.cov.shape == (4, 4)
self.mvn = scipy.stats.multivariate_normal(mean=self.mean, cov=self.cov) # set-up multivariate normal
def load_pd(self, n_samples: int = None) -> Tuple[pd.DataFrame, pd.Series]:
n_samples = 4000 if n_samples is None else n_samples
x = self.mvn.rvs(n_samples) # draw random samples
y = self.function(x)
# x += self.noise * np.random.randn(x.size).reshape(x.shape)
return
|
pd.DataFrame(x, columns=['0', '1', '2', '3'])
|
pandas.DataFrame
|
# coding=utf-8
"""
deep_model_1,使用原始特征,categorical features使用one-hot,并且加入少量的交叉特征。
@author: yuhaitao
"""
import pandas as pd
import os
import numpy as np
import gc
import pickle
import datetime
import sys
import multiprocessing
import json
import tensorflow as tf
import shutil # 清空文件夹
import psutil # 查看占用内存
from tqdm import tqdm
from sklearn.model_selection import KFold
from data_loader import myDataLoader, var_norm, min_max_norm
from utils import get_emb_id, cross_feature, norm_and_smape, SMAPE
class deep_model_not_emb(tf.keras.Model):
"""
deep模型, 带embedding
"""
def __init__(self):
"""初始化layers"""
super().__init__()
self.dense_1 = tf.keras.layers.Dense(
units=1024 * 4, name='deep_1', activation=tf.keras.activations.relu,
kernel_regularizer=tf.keras.regularizers.l2(1.0))
self.dense_add = tf.keras.layers.Dense(
units=512 * 4, name='deep_add', activation=tf.keras.activations.relu,
kernel_regularizer=tf.keras.regularizers.l2(1.0))
self.dense_2 = tf.keras.layers.Dense(
units=256 * 4, name='deep_2', activation=tf.keras.activations.relu,
kernel_regularizer=tf.keras.regularizers.l2(1.0))
self.dense_add_2 = tf.keras.layers.Dense(
units=128 * 4, name='deep_add_2', activation=tf.keras.activations.relu,
kernel_regularizer=tf.keras.regularizers.l2(1.0))
self.dense_3 = tf.keras.layers.Dense(
units=1, name='deep_3', activation=None)
def call(self, inputs, training=False):
"""模型调用"""
x, id_, y_t = inputs
deep_part = self.dense_1(x)
deep_part = self.dense_add(deep_part)
deep_part = self.dense_2(deep_part)
deep_part = self.dense_add_2(deep_part)
deep_part = self.dense_3(deep_part)
out = deep_part
return out, id_, y_t
def make_tfrecords_dataset(tfrecords_file, batch_size, input_size, label_id, is_shuffle=True):
"""
构造dataset
"""
feature_description = { # 定义Feature结构,告诉解码器每个Feature的类型是什么
'inputs': tf.io.FixedLenFeature([input_size], tf.float32), # 注意shape
'labels': tf.io.FixedLenFeature([6], tf.float32),
'id_': tf.io.FixedLenFeature([], tf.string)
}
i = int(label_id[-1]) - 1
def _parse_example(example_string): # 将 TFRecord 文件中的每一个序列化的 tf.train.Example 解码
feature_dict = tf.io.parse_single_example(
example_string, feature_description)
return (feature_dict['inputs'], [feature_dict['id_']], [feature_dict['labels'][i]]), [feature_dict['labels'][i]]
dataset = tf.data.TFRecordDataset(tfrecords_file)
dataset = dataset.map(
_parse_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size=batch_size, drop_remainder=False)
if is_shuffle:
dataset = dataset.shuffle(buffer_size=15000).repeat()
dataset = dataset.prefetch(buffer_size=batch_size * 8)
return dataset
def wnd_train(tfrecords_dir, wnd_params, label_id):
"""
wide & deep 模型训练
"""
n_fold = wnd_params['n_fold']
batch_size = wnd_params['batch_size']
with open('./feature_info.json', 'r') as f:
feature_infos = json.load(f)
# 模型与logs保存
model_path = wnd_params['model_path']
log_path = wnd_params['log_path']
if not os.path.exists(model_path):
os.mkdir(model_path)
if not os.path.exists(log_path):
os.mkdir(log_path)
log_file = open(os.path.join(log_path, 'print.log'), 'w')
stdout_backup = sys.stdout
sys.stdout = log_file
smape_score = np.zeros(n_fold) # 评估分数
# 训练k_fold, 因为已经划分tfrecords了
for fold_idx in range(n_fold):
time_stamp = datetime.datetime.now()
print('*' * 120)
print(f"Fold [{fold_idx}]: " +
time_stamp.strftime('%Y.%m.%d-%H:%M:%S'))
# 每个fold的model
model_path_f = os.path.join(model_path, f'fold_{fold_idx}')
log_path_f = os.path.join(log_path, f'fold_{fold_idx}')
if not os.path.exists(model_path_f):
os.mkdir(model_path_f)
else:
shutil.rmtree(model_path_f)
os.mkdir(model_path_f)
if not os.path.exists(log_path_f):
os.mkdir(log_path_f)
else:
shutil.rmtree(log_path_f)
os.mkdir(log_path_f)
# 加载dataset
train_set = make_tfrecords_dataset(os.path.join(
tfrecords_dir, f'standard_train_13853_fold_{fold_idx}.tfrecords'), batch_size=batch_size, input_size=13853, label_id=label_id, is_shuffle=True)
val_set = make_tfrecords_dataset(os.path.join(
tfrecords_dir, f'standard_val_13853_fold_{fold_idx}.tfrecords'), batch_size=batch_size * 16, input_size=13853, label_id=label_id, is_shuffle=False)
train_iter = iter(train_set) # 为训练集构造迭代器
# 创建model
model = deep_model_not_emb()
deep_optimizer = tf.keras.optimizers.Adam(
learning_rate=wnd_params['learning_rate'])
# checkpoint保存参数
checkpoint = tf.train.Checkpoint(mymodel=model)
manager = tf.train.CheckpointManager(
checkpoint, directory=model_path_f, max_to_keep=1)
# 记录summary
summary_writer = tf.summary.create_file_writer(log_path_f) # 实例化记录器
@tf.function
def train_one_step(x, y):
"""训练一次, 静态图模式"""
with tf.GradientTape() as tape:
y_pred, id_, y_t = model(x, training=True)
loss = tf.reduce_mean(
tf.keras.losses.MAE(y_true=y, y_pred=y_pred))
deep_grads = tape.gradient(loss, model.trainable_weights)
deep_optimizer.apply_gradients(
grads_and_vars=zip(deep_grads, model.trainable_weights))
return y_pred, loss
# 定义一些训练时的指标
loss_record, smape_record, min_val_smape, early_stop_rounds = 0., 0., 200.0, 0
s_time = datetime.datetime.now()
# 迭代
for i in range(wnd_params['iterations']):
x, y = next(train_iter)
y_pred, loss = train_one_step(x, y)
loss_record += loss
smape_record += norm_and_smape(y, y_pred, label_id,
feature_infos, norm_type='var', mode="norm_all")
# eval
if i % 1000 == 0 and i != 0:
val_out = model.predict(x=val_set)
val_preds = val_out[0].squeeze()
val_true = val_out[2].squeeze()
# 计算分数
val_smape = norm_and_smape(
val_true, val_preds, label_id, feature_infos, norm_type='var', mode='norm_all')
e_time = datetime.datetime.now()
mem_use = psutil.Process(
os.getpid()).memory_info().rss / (1024**3)
print(f'steps: {i}, train_loss: {loss_record / 1000}, train SMAPE: {(smape_record / 1000):.6f}, val SMAPE: {val_smape:.6f}, time_cost: {(e_time-s_time)}, memory_use: {mem_use:.4f}')
with summary_writer.as_default():
tf.summary.scalar("mean_train_loss",
(loss_record / 1000), step=i)
tf.summary.scalar(
"SMAPE/train", (smape_record / 1000), step=i)
tf.summary.scalar("SMAPE/val", val_smape, step=i)
tf.summary.scalar("memory_use", mem_use, step=i)
# 模型保存与early stop
if val_smape < min_val_smape:
min_val_smape = val_smape
manager.save(checkpoint_number=i)
smape_score[fold_idx] = val_smape
early_stop_rounds = 0
else:
early_stop_rounds += 1
if early_stop_rounds >= 50:
break
loss_record, smape_record = 0.0, 0.0
s_time = datetime.datetime.now()
tf.keras.backend.clear_session() # 清理内存
del model, deep_optimizer, train_set, val_set, checkpoint, summary_writer, manager
gc.collect()
# 总的smape
print('*' * 120)
print(f'Mean smape in each fold of {label_id}: {np.mean(smape_score)}')
# 输出重定向结束
log_file.close()
sys.stdout = stdout_backup
def predict_one_fold(fold_idx, label_id, model_dir, test_set, tfrecords_dir):
# 加载模型
val_set = make_tfrecords_dataset(os.path.join(
tfrecords_dir, f'standard_val_13853_fold_{fold_idx}.tfrecords'), batch_size=1024, input_size=13853, label_id=label_id, is_shuffle=False)
model = deep_model_not_emb()
checkpoint = tf.train.Checkpoint(mymodel=model)
model_dir = os.path.join(model_dir, f'fold_{fold_idx}')
checkpoint.restore(tf.train.latest_checkpoint(model_dir))
# val 预测
val_out = model.predict(x=val_set)
val_preds = val_out[0].squeeze()
val_true = val_out[2].squeeze()
# test 预测
test_out = model.predict(x=test_set)
test_preds = test_out[0].squeeze() / 5
test_ids = test_out[1].squeeze()
return val_preds, val_true, test_preds, test_ids
def wnd_predict(tfrecords_dir, label_id, model_dir):
"""
加载选定的模型输出预测结果,并保存到result文件
参数:
tfrecords_dir 存tfrecords的目录
model_dir: 模型路径
"""
with open('./feature_info.json', 'r') as f:
feature_infos = json.load(f)
test_preds, test_ids = None, None
local_smape = np.zeros(5)
test_set = make_tfrecords_dataset(os.path.join(
tfrecords_dir, f'new_test_13853.tfrecords'), batch_size=1024, input_size=13853, label_id=label_id, is_shuffle=False)
# 每个fold对验证集进行预测
print(f'Deep Model Predicting for {label_id} ......')
for fold_idx in range(5):
val_preds, val_true, one_test, one_test_id = predict_one_fold(fold_idx, label_id, model_dir, test_set, tfrecords_dir)
if fold_idx == 0:
test_preds = one_test
test_ids = one_test_id
print(f'Test data num: {test_preds.shape}')
else:
test_preds += one_test
# 计算每个指标的local score
local_smape[fold_idx] = norm_and_smape(val_true, val_preds, label_id, feature_infos, norm_type='var', mode='norm_all')
print(f'SMAPE score of fold_{fold_idx}: {local_smape[fold_idx]:.6f}')
tf.keras.backend.clear_session() # 清理内存
gc.collect()
print(f'Local SMAPE score of {label_id} is {np.mean(local_smape):.6f}')
return test_preds, test_ids, np.mean(local_smape)
def wnd_main(mode=None, label_str=''):
"""
读取数据,确定参数,并且控制训练或预测
参数:
mode: train / predict
label_str: 字符串,手动输入训练哪个参数
"""
if mode == 'train':
pass
# 动态显存分配
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
tf.config.experimental.set_visible_devices(devices=gpus[int(label_str[-1])%2], device_type='GPU')
tf.config.experimental.set_memory_growth(device=gpus[int(label_str[-1])%2], enable=True)
# 多线程实现不执行,所以一次只训练一个指标
model_dir = f'./models/baseline/{label_str}'
log_dir = f'./logs/baseline/{label_str}'
tfrecords_dir = './data/tfrecords/standard'
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
# 定义cat_params
wnd_params = {
'n_fold': 5,
'iterations': 350000,
'batch_size': 64,
'learning_rate': 0.0001,
'model_path': os.path.join(model_dir, 'deep_model_1'),
'log_path': os.path.join(log_dir, 'deep_model_1'),
}
print(f"Training {label_str} ...")
wnd_train(tfrecords_dir, wnd_params, label_id=label_str)
elif mode == 'predict':
"""预测"""
with open('./feature_info.json', 'r') as f:
feature_infos = json.load(f)
tfrecords_dir = './data/tfrecords/standard'
# 动态显存分配
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
tf.config.experimental.set_visible_devices(devices=gpus[0], device_type='GPU')
tf.config.experimental.set_memory_growth(device=gpus[0], enable=True)
label_list = ['p1', 'p2', 'p3', 'p4', 'p5', 'p6']
model_dic = {
'p1': 'deep_model_1',
'p2': 'deep_model_1',
'p3': 'deep_model_1',
'p4': 'deep_model_1',
'p5': 'deep_model_1',
'p6': 'deep_model_1',
}
test_ids = None
test_preds = None
mean_smape = np.zeros(6)
for i in range(len(label_list)):
model_dir = f'./models/baseline/{label_list[i]}/{model_dic[label_list[i]]}'
local_test, one_test_ids, local_smape = wnd_predict(tfrecords_dir, label_list[i], model_dir)
if i == 0:
test_ids = one_test_ids
test_preds = np.zeros((len(local_test), 6))
test_preds[:, i] = local_test * feature_infos[label_list[i]]['std'] + feature_infos[label_list[i]]['mean']
mean_smape[i] = local_smape
print('*' * 120)
print(f'Deep Model Mean local SMAPE score is {np.mean(mean_smape):.6f}')
# 提交文件
id_df = pd.DataFrame(test_ids.astype(str), columns=['id'])
pred_df = pd.DataFrame(test_preds, columns=label_list)
result =
|
pd.concat([id_df, pred_df], axis=1)
|
pandas.concat
|
import pandas
import os
from sklearn.preprocessing import MinMaxScaler
from DataPreprocessing import preprocess
import numpy
def Begin_Processing_Prediction_CSV(ModelOutputCSVPath):
modelOutputDF = pandas.read_csv(ModelOutputCSVPath)
predictionsOutputDF = modelOutputDF.iloc[-1: ]
finalPredictionsOutput = predictionsOutputDF['Prediction']
#allows a workaround so I can later split it up into a true list
predictions = []
for o in finalPredictionsOutput:
predictions.append(o)
return predictions
def Processing_Prediction_To_Array(PredictionProcessedOnce):
#Puts all the prediction values in a list
#[-] pulls out the string and [2:-2] removes [[]]
#then it splits on spaces and places it into a list
predictionProssesedList = list(map(float, PredictionProcessedOnce[-1][2:-2].split()))
predictionProssesedArray = [[val] for val in predictionProssesedList]
return predictionProssesedArray
def Unscale_Prediction_To_DF(PreddictionArray, Stock):
#pulls data similar to in the stockpredictionfile
unscaledTrainData = preprocess(Stock, 'TrainData')
unscaledTestData = preprocess(Stock, 'TestData')
#Scale and then Unscale
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler.fit(unscaledTrainData)
scaledTest = scaler.transform(unscaledTestData)
scaledTest = numpy.delete(scaledTest, 0, 1)
scaledTest = numpy.insert(scaledTest, [0], PreddictionArray, axis = 1)
unscaledTest = scaler.inverse_transform(scaledTest)
unscaledTestDF =
|
pandas.DataFrame(unscaledTest)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
|
tm.assert_frame_equal(result, expected.T)
|
pandas.util.testing.assert_frame_equal
|
# Author: KTH dESA Last modified by <NAME>
# Date: 26 November 2018
# Python version: 3.7
import os
import logging
import pandas as pd
from math import ceil, pi, exp, log, sqrt, radians, cos, sin, asin
from pyproj import Proj
import numpy as np
from collections import defaultdict
logging.basicConfig(format='%(asctime)s\t\t%(message)s', level=logging.DEBUG)
# General
LHV_DIESEL = 9.9445485 # (kWh/l) lower heating value
HOURS_PER_YEAR = 8760
# Columns in settlements file must match these exactly
SET_COUNTRY = 'Country' # This cannot be changed, lots of code will break
SET_X = 'X' # Coordinate in kilometres
SET_Y = 'Y' # Coordinate in kilometres
SET_X_DEG = 'X_deg' # Coordinates in degrees
SET_Y_DEG = 'Y_deg'
SET_POP = 'Pop' # Population in people per point (equally, people per km2)
SET_POP_CALIB = 'PopStartYear' # Calibrated population to reference year, same units
SET_POP_FUTURE = 'PopEndYear' # Project future population, same units
SET_GRID_DIST_CURRENT = 'GridDistCurrent' # Distance in km from current grid
SET_GRID_DIST_PLANNED = 'GridDistPlan' # Distance in km from current and future grid
SET_ROAD_DIST = 'RoadDist' # Distance in km from road network
SET_NIGHT_LIGHTS = 'NightLights' # Intensity of night time lights (from NASA), range 0 - 63
SET_TRAVEL_HOURS = 'TravelHours' # Travel time to large city in hours
SET_GHI = 'GHI' # Global horizontal irradiance in kWh/m2/day
SET_WINDVEL = 'WindVel' # Wind velocity in m/s
SET_WINDCF = 'WindCF' # Wind capacity factor as percentage (range 0 - 1)
SET_HYDRO = 'Hydropower' # Hydropower potential in kW
SET_HYDRO_DIST = 'HydropowerDist' # Distance to hydropower site in km
SET_HYDRO_FID = 'HydropowerFID' # the unique tag for eah hydropower, to not over-utilise
SET_SUBSTATION_DIST = 'SubstationDist'
SET_ELEVATION = 'Elevation' # in metres
SET_SLOPE = 'Slope' # in degrees
SET_LAND_COVER = 'LandCover'
SET_ROAD_DIST_CLASSIFIED = 'RoadDistClassified'
SET_SUBSTATION_DIST_CLASSIFIED = 'SubstationDistClassified'
SET_ELEVATION_CLASSIFIED = 'ElevationClassified'
SET_SLOPE_CLASSIFIED = 'SlopeClassified'
SET_LAND_COVER_CLASSIFIED = 'LandCoverClassified'
SET_COMBINED_CLASSIFICATION = 'GridClassification'
SET_GRID_PENALTY = 'GridPenalty'
SET_URBAN = 'IsUrban' # Whether the site is urban (0 or 1)
SET_ENERGY_PER_CELL = 'EnergyPerSettlement'
SET_NUM_PEOPLE_PER_HH = 'NumPeoplePerHH'
SET_ELEC_CURRENT = 'ElecStart' # If the site is currently electrified (0 or 1)
SET_ELEC_FUTURE = 'Elec_Status' # If the site has the potential to be 'easily' electrified in future
SET_ELEC_FUTURE_GRID = "Elec_Initial_Status_Grid"
SET_ELEC_FUTURE_OFFGRID = "Elec_Init_Status_Offgrid"
SET_ELEC_FUTURE_ACTUAL = "Actual_Elec_Status_"
SET_ELEC_FINAL_GRID = "GridElecIn"
SET_ELEC_FINAL_OFFGRID = "OffGridElecIn"
SET_NEW_CONNECTIONS = 'NewConnections' # Number of new people with electricity connections
SET_MIN_GRID_DIST = 'MinGridDist'
SET_LCOE_GRID = 'Grid_extension' # All lcoes in USD/kWh
SET_LCOE_SA_PV = 'SA_PV'
SET_LCOE_SA_DIESEL = 'SA_Diesel'
SET_LCOE_MG_WIND = 'MG_Wind'
SET_LCOE_MG_DIESEL = 'MG_Diesel'
SET_LCOE_MG_PV = 'MG_PV'
SET_LCOE_MG_HYDRO = 'MG_Hydro'
SET_GRID_LCOE_Round1 = "Grid_lcoe_PreElec"
SET_MIN_OFFGRID = 'Minimum_Tech_Off_grid' # The technology with lowest lcoe (excluding grid)
SET_MIN_OVERALL = 'MinimumOverall' # Same as above, but including grid
SET_MIN_OFFGRID_LCOE = 'Minimum_LCOE_Off_grid' # The lcoe value for minimum tech
SET_MIN_OVERALL_LCOE = 'MinimumOverallLCOE' # The lcoe value for overall minimum
SET_MIN_OVERALL_CODE = 'MinimumOverallCode' # And a code from 1 - 7 to represent that option
SET_MIN_CATEGORY = 'MinimumCategory' # The category with minimum lcoe (grid, minigrid or standalone)
SET_NEW_CAPACITY = 'NewCapacity' # Capacity in kW
SET_INVESTMENT_COST = 'InvestmentCost' # The investment cost in USD
SET_INVESTMENT_COST_OFFGRID = "InvestmentOffGrid"
SET_CONFLICT = "Conflict"
SET_ELEC_ORDER = "ElectrificationOrder"
SET_DYNAMIC_ORDER = "Electrification_Wave"
SET_LIMIT = "ElecStatusIn"
SET_GRID_REACH_YEAR = "GridReachYear"
SET_MIN_OFFGRID_CODE = "Off_Grid_Code"
SET_ELEC_FINAL_CODE = "FinalElecCode"
SET_DIST_TO_TRANS = "TransformerDist"
SET_TOTAL_ENERGY_PER_CELL = "TotalEnergyPerCell" # all previous + current timestep
SET_RESIDENTIAL_DEMAND = "ResidentialDemand"
SET_AGRI_DEMAND = "AgriDemand"
SET_HEALTH_DEMAND = "HealthDemand"
SET_EDU_DEMAND = "EducationDemand"
SET_COMMERCIAL_DEMAND = "CommercialDemand"
SET_GRID_CELL_AREA = 'GridCellArea'
# Columns in the specs file must match these exactly
SPE_COUNTRY = 'Country'
SPE_POP = 'PopStartYear' # The actual population in the base year
SPE_URBAN = 'UrbanRatioStartYear' # The ratio of urban population (range 0 - 1) in base year
SPE_POP_FUTURE = 'PopEndYear'
SPE_URBAN_FUTURE = 'UrbanRatioEndYear'
SPE_URBAN_MODELLED = 'UrbanRatioModelled' # The urban ratio in the model after calibration (for comparison)
SPE_URBAN_CUTOFF = 'UrbanCutOff' # The urban cutoff population calirated by the model, in people per km2
SPE_URBAN_GROWTH = 'UrbanGrowth' # The urban growth rate as a simple multplier (urban pop future / urban pop present)
SPE_RURAL_GROWTH = 'RuralGrowth' # Same as for urban
SPE_NUM_PEOPLE_PER_HH_RURAL = 'NumPeoplePerHHRural'
SPE_NUM_PEOPLE_PER_HH_URBAN = 'NumPeoplePerHHUrban'
SPE_DIESEL_PRICE_LOW = 'DieselPriceLow' # Diesel price in USD/litre
SPE_DIESEL_PRICE_HIGH = 'DieselPriceHigh' # Same, with a high forecast var
SPE_GRID_PRICE = 'GridPrice' # Grid price of electricity in USD/kWh
SPE_GRID_CAPACITY_INVESTMENT = 'GridCapacityInvestmentCost' # grid capacity investments costs from TEMBA USD/kW
SPE_GRID_LOSSES = 'GridLosses' # As a ratio (0 - 1)
SPE_BASE_TO_PEAK = 'BaseToPeak' # As a ratio (0 - 1)
SPE_EXISTING_GRID_COST_RATIO = 'ExistingGridCostRatio'
SPE_MAX_GRID_DIST = 'MaxGridDist'
SPE_ELEC = 'ElecActual' # Actual current percentage electrified population (0 - 1)
SPE_ELEC_MODELLED = 'ElecModelled' # The modelled version after calibration (for comparison)
SPE_MIN_NIGHT_LIGHTS = 'MinNightLights'
SPE_MAX_GRID_EXTENSION_DIST = 'MaxGridExtensionDist'
SPE_MAX_ROAD_DIST = 'MaxRoadDist'
SPE_POP_CUTOFF1 = 'PopCutOffRoundOne'
SPE_POP_CUTOFF2 = 'PopCutOffRoundTwo'
SPE_CAP_COST_MG_PV = "Cap_Cost_MG_PV"
SPE_ELEC_LIMIT = "ElecLimit"
SPE_INVEST_LIMIT = "InvestmentLimit"
SPE_DIST_TO_TRANS = "DistToTrans"
SPE_START_YEAR = "StartYear"
SPE_END_YEAR = "EndYEar"
SPE_TIMESTEP = "TimeStep"
class Technology:
"""
Used to define the parameters for each electricity access technology, and to calculate the LCOE depending on
input parameters.
"""
discount_rate = 0.12
# grid_cell_area = 0.01 # in km2, normally 1km2
mv_line_cost = 9000 # USD/km
lv_line_cost = 5000 # USD/km
mv_line_capacity = 50 # kW/line
lv_line_capacity = 10 # kW/line
lv_line_max_length = 30 # km
hv_line_cost = 120000 # USD/km
mv_line_max_length = 50 # km
hv_lv_transformer_cost = 3500 # USD/unit
mv_increase_rate = 0.1 # percentage
existing_grid_cost_ratio = 0.1 # percentage
def __init__(self,
tech_life, # in years
base_to_peak_load_ratio,
distribution_losses=0, # percentage
connection_cost_per_hh=0, # USD/hh
om_costs=0.0, # OM costs as percentage of capital costs
capital_cost=0, # USD/kW
capacity_factor=0.9, # percentage
grid_penalty_ratio=1, # multiplier
efficiency=1.0, # percentage
diesel_price=0.0, # USD/litre
grid_price=0.0, # USD/kWh for grid electricity
standalone=False,
existing_grid_cost_ratio=0.1, # percentage
grid_capacity_investment=0.0, # USD/kW for on-grid capacity investments (excluding grid itself)
diesel_truck_consumption=0, # litres/hour
diesel_truck_volume=0, # litres
om_of_td_lines=0): # percentage
self.distribution_losses = distribution_losses
self.connection_cost_per_hh = connection_cost_per_hh
self.base_to_peak_load_ratio = base_to_peak_load_ratio
self.tech_life = tech_life
self.om_costs = om_costs
self.capital_cost = capital_cost
self.capacity_factor = capacity_factor
self.grid_penalty_ratio = grid_penalty_ratio
self.efficiency = efficiency
self.diesel_price = diesel_price
self.grid_price = grid_price
self.standalone = standalone
self.existing_grid_cost_ratio = existing_grid_cost_ratio
self.grid_capacity_investment = grid_capacity_investment
self.diesel_truck_consumption = diesel_truck_consumption
self.diesel_truck_volume = diesel_truck_volume
self.om_of_td_lines = om_of_td_lines
@classmethod
def set_default_values(cls, base_year, start_year, end_year, discount_rate, mv_line_cost, lv_line_cost,
mv_line_capacity, lv_line_capacity, lv_line_max_length, hv_line_cost, mv_line_max_length,
hv_lv_transformer_cost, mv_increase_rate):
cls.base_year = base_year
cls.start_year = start_year
cls.end_year = end_year
cls.discount_rate = discount_rate
# cls.grid_cell_area = grid_cell_area
cls.mv_line_cost = mv_line_cost
cls.lv_line_cost = lv_line_cost
cls.mv_line_capacity = mv_line_capacity
cls.lv_line_capacity = lv_line_capacity
cls.lv_line_max_length = lv_line_max_length
cls.hv_line_cost = hv_line_cost
cls.mv_line_max_length = mv_line_max_length
cls.hv_lv_transformer_cost = hv_lv_transformer_cost
cls.mv_increase_rate = mv_increase_rate
def get_lcoe(self, energy_per_cell, people, num_people_per_hh, start_year, end_year, new_connections,
total_energy_per_cell, prev_code, grid_cell_area, conf_status=0, additional_mv_line_length=0, capacity_factor=0,
grid_penalty_ratio=1, mv_line_length=0, travel_hours=0, elec_loop=0, get_investment_cost=False,
get_investment_cost_lv=False, get_investment_cost_mv=False, get_investment_cost_hv=False,
get_investment_cost_transformer=False, get_investment_cost_connection=False):
"""
Calculates the LCOE depending on the parameters. Optionally calculates the investment cost instead.
The only required parameters are energy_per_cell, people and num_people_per_hh
additional_mv_line_length required for grid
capacity_factor required for PV and wind
mv_line_length required for hydro
travel_hours required for diesel
"""
if people == 0:
# If there are no people, the investment cost is zero.
if get_investment_cost:
return 0
# Otherwise we set the people low (prevent div/0 error) and continue.
else:
people = 0.00001
if energy_per_cell == 0:
# If there is no demand, the investment cost is zero.
if get_investment_cost:
return 0
# Otherwise we set the people low (prevent div/0 error) and continue.
else:
energy_per_cell = 0.000000000001
if grid_penalty_ratio == 0:
grid_penalty_ratio = self.grid_penalty_ratio
# If a new capacity factor isn't given, use the class capacity factor (for hydro, diesel etc)
if capacity_factor == 0:
capacity_factor = self.capacity_factor
def distribution_network(people, energy_per_cell):
if energy_per_cell <= 0:
energy_per_cell = 0.0001
if people <= 0:
people = 0.0001
consumption = energy_per_cell # kWh/year
average_load = consumption / (1 - self.distribution_losses) / HOURS_PER_YEAR # kW
peak_load = average_load / self.base_to_peak_load_ratio # kW
no_mv_lines = peak_load / self.mv_line_capacity
no_lv_lines = peak_load / self.lv_line_capacity
lv_networks_lim_capacity = no_lv_lines / no_mv_lines
lv_networks_lim_length = ((grid_cell_area / no_mv_lines) / (self.lv_line_max_length / sqrt(2))) ** 2
actual_lv_lines = min([people / num_people_per_hh, max([lv_networks_lim_capacity, lv_networks_lim_length])])
hh_per_lv_network = (people / num_people_per_hh) / (actual_lv_lines * no_mv_lines)
lv_unit_length = sqrt(grid_cell_area / (people / num_people_per_hh)) * sqrt(2) / 2
lv_lines_length_per_lv_network = 1.333 * hh_per_lv_network * lv_unit_length
total_lv_lines_length = no_mv_lines * actual_lv_lines * lv_lines_length_per_lv_network
line_reach = (grid_cell_area / no_mv_lines) / (2 * sqrt(grid_cell_area / no_lv_lines))
total_length_of_lines = min([line_reach, self.mv_line_max_length]) * no_mv_lines
additional_hv_lines = max([0, round(sqrt(grid_cell_area) /
(2 * min([line_reach, self.mv_line_max_length])) / 10, 3) - 1])
hv_lines_total_length = (sqrt(grid_cell_area) / 2) * additional_hv_lines * sqrt(grid_cell_area)
num_transformers = additional_hv_lines + no_mv_lines + (no_mv_lines * actual_lv_lines)
generation_per_year = average_load * HOURS_PER_YEAR
return hv_lines_total_length, total_length_of_lines, total_lv_lines_length, \
num_transformers, generation_per_year, peak_load
if people != new_connections and (prev_code == 1 or prev_code == 4 or prev_code == 5 or
prev_code == 6 or prev_code == 7):
hv_lines_total_length1, total_length_of_lines1, total_lv_lines_length1, \
num_transformers1, generation_per_year1, peak_load1 = distribution_network(people, total_energy_per_cell)
hv_lines_total_length2, total_length_of_lines2, total_lv_lines_length2, \
num_transformers2, generation_per_year2, peak_load2 = \
distribution_network(people=(people - new_connections),
energy_per_cell=(total_energy_per_cell - energy_per_cell))
hv_lines_total_length3, total_length_of_lines3, total_lv_lines_length3, \
num_transformers3, generation_per_year3, peak_load3 = \
distribution_network(people=new_connections,
energy_per_cell=energy_per_cell)
hv_lines_total_length = hv_lines_total_length1 - hv_lines_total_length2
total_length_of_lines = total_length_of_lines1 - total_length_of_lines2
total_lv_lines_length = total_lv_lines_length1 - total_lv_lines_length2
num_transformers = num_transformers1 - num_transformers2
generation_per_year = generation_per_year1 - generation_per_year2
peak_load = peak_load1 - peak_load2
# hv_lines_total_length = hv_lines_total_length3
# total_length_of_lines = total_length_of_lines3
# total_lv_lines_length = total_lv_lines_length3
# num_transformers = num_transformers3
# generation_per_year = generation_per_year3
# peak_load = peak_load3
else:
hv_lines_total_length, total_length_of_lines, total_lv_lines_length, \
num_transformers, generation_per_year, peak_load = distribution_network(people, energy_per_cell)
conf_grid_pen = {0: 1, 1: 1.18, 2: 1.39, 3: 1.6, 4: 2}
# The investment and O&M costs are different for grid and non-grid solutions
if self.grid_price > 0:
td_investment_cost = hv_lines_total_length * (
self.hv_line_cost * conf_grid_pen[conf_status]) * \
(1 + self.existing_grid_cost_ratio * elec_loop) + total_length_of_lines * \
(self.mv_line_cost * conf_grid_pen[conf_status]) * \
(1 + self.existing_grid_cost_ratio * elec_loop) + \
total_lv_lines_length * (self.lv_line_cost * conf_grid_pen[conf_status]) + \
num_transformers * self.hv_lv_transformer_cost + \
(new_connections / num_people_per_hh) * self.connection_cost_per_hh + \
(1 + self.existing_grid_cost_ratio * elec_loop) * additional_mv_line_length * (
(self.mv_line_cost * conf_grid_pen[conf_status]) * (
1 + self.mv_increase_rate) ** ((additional_mv_line_length / 5) - 1))
td_investment_cost = td_investment_cost * grid_penalty_ratio
td_om_cost = td_investment_cost * self.om_of_td_lines
total_investment_cost = td_investment_cost
total_om_cost = td_om_cost
fuel_cost = self.grid_price
else:
conflict_sa_pen = {0: 1, 1: 1, 2: 1, 3: 1, 4: 1}
conflict_mg_pen = {0: 1, 1: 1.18, 2: 1.39, 3: 1.6, 4: 2}
total_lv_lines_length *= 0 if self.standalone else 0.75
mv_total_line_cost = self.mv_line_cost * mv_line_length * conflict_sa_pen[
conf_status] if self.standalone \
else self.mv_line_cost * mv_line_length * conflict_mg_pen[conf_status]
lv_total_line_cost = self.lv_line_cost * total_lv_lines_length * conflict_sa_pen[
conf_status] if self.standalone \
else self.lv_line_cost * total_lv_lines_length * conflict_mg_pen[conf_status]
installed_capacity = peak_load / capacity_factor
capital_investment = installed_capacity * self.capital_cost * conflict_sa_pen[
conf_status] if self.standalone \
else installed_capacity * self.capital_cost * conflict_mg_pen[conf_status]
td_investment_cost = mv_total_line_cost + lv_total_line_cost + (
new_connections / num_people_per_hh) * self.connection_cost_per_hh
td_om_cost = td_investment_cost * self.om_of_td_lines * conflict_sa_pen[conf_status] if self.standalone \
else td_investment_cost * self.om_of_td_lines * conflict_mg_pen[conf_status]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost * conflict_sa_pen[conf_status] * self.om_costs *
conflict_sa_pen[conf_status] * installed_capacity) if self.standalone \
else td_om_cost + (self.capital_cost * conflict_mg_pen[conf_status] * self.om_costs *
conflict_mg_pen[conf_status] * installed_capacity)
# If a diesel price has been passed, the technology is diesel
# And we apply the Szabo formula to calculate the transport cost for the diesel
# p = (p_d + 2*p_d*consumption*time/volume)*(1/mu)*(1/LHVd)
# Otherwise it's hydro/wind etc with no fuel cost
conf_diesel_pen = {0: 1, 1: 1.18, 2: 1.39, 3: 1.6, 4: 2}
if self.diesel_price > 0:
fuel_cost = (self.diesel_price + 2 * self.diesel_price * self.diesel_truck_consumption * (
travel_hours * conf_diesel_pen[conf_status]) /
self.diesel_truck_volume) / LHV_DIESEL / self.efficiency
else:
fuel_cost = 0
# Perform the time-value LCOE calculation
project_life = end_year - self.base_year + 1
reinvest_year = 0
step = start_year - self.base_year
# If the technology life is less than the project life, we will have to invest twice to buy it again
if self.tech_life + step < project_life:
reinvest_year = self.tech_life + step
year = np.arange(project_life)
el_gen = generation_per_year * np.ones(project_life)
el_gen[0:step] = 0
if conf_status == 1:
self.discount_rate = 0.12 # 0.133
discount_factor = (1 + self.discount_rate) ** year
elif conf_status == 2:
self.discount_rate = 0.12 # 0.145
discount_factor = (1 + self.discount_rate) ** year
elif conf_status == 3:
self.discount_rate = 0.12 # 0.158
discount_factor = (1 + self.discount_rate) ** year
elif conf_status ==4:
self.discount_rate = 0.12 # 0.171
discount_factor = (1 + self.discount_rate) ** year
else:
discount_factor = (1 + self.discount_rate) ** year
#discount_factor = (1 + self.discount_rate) ** year
investments = np.zeros(project_life)
investments[step] = total_investment_cost
# Calculate the year of re-investment if tech_life is smaller than project life
if reinvest_year:
investments[reinvest_year] = total_investment_cost
# Calculate salvage value if tech_life is bigger than project life
salvage = np.zeros(project_life)
if reinvest_year > 0:
used_life = (project_life - step) - self.tech_life
else:
used_life = project_life - step - 1
salvage[-1] = total_investment_cost * (1 - used_life / self.tech_life)
# salvage[project_life - 1] = total_investment_cost * (1 - used_life / self.tech_life)
operation_and_maintenance = total_om_cost * np.ones(project_life)
operation_and_maintenance[0:step] = 0
fuel = el_gen * fuel_cost
fuel[0:step] = 0
# So we also return the total investment cost for this number of people
if get_investment_cost:
discounted_investments = investments / discount_factor
return np.sum(discounted_investments) + (self.grid_capacity_investment * peak_load)
elif get_investment_cost_lv:
return total_lv_lines_length * (self.lv_line_cost * conf_grid_pen[conf_status])
elif get_investment_cost_mv:
return total_length_of_lines * (self.mv_line_cost * conf_grid_pen[conf_status]) * \
(1 + self.existing_grid_cost_ratio * elec_loop) + (1 + self.existing_grid_cost_ratio * elec_loop) * additional_mv_line_length * (
(self.mv_line_cost * conf_grid_pen[conf_status]) * (
1 + self.mv_increase_rate) ** ((additional_mv_line_length / 5) - 1))
elif get_investment_cost_hv:
return hv_lines_total_length * (self.hv_line_cost * conf_grid_pen[conf_status]) * \
(1 + self.existing_grid_cost_ratio * elec_loop)
elif get_investment_cost_transformer:
return num_transformers * self.hv_lv_transformer_cost
elif get_investment_cost_connection:
return (new_connections / num_people_per_hh) * self.connection_cost_per_hh
else:
discounted_costs = (investments + operation_and_maintenance + fuel - salvage) / discount_factor
discounted_generation = el_gen / discount_factor
return np.sum(discounted_costs) / np.sum(discounted_generation)
class SettlementProcessor:
"""
Processes the dataframe and adds all the columns to determine the cheapest option and the final costs and summaries
"""
def __init__(self, path):
try:
self.df = pd.read_csv(path)
except FileNotFoundError:
print('You need to first split into a base directory and prep!')
raise
def condition_df(self, country):
"""
Do any initial data conditioning that may be required.
"""
logging.info('Ensure that columns that are supposed to be numeric are numeric')
self.df[SET_GHI] = pd.to_numeric(self.df[SET_GHI], errors='coerce')
self.df[SET_WINDVEL] = pd.to_numeric(self.df[SET_WINDVEL], errors='coerce')
self.df[SET_NIGHT_LIGHTS] = pd.to_numeric(self.df[SET_NIGHT_LIGHTS], errors='coerce')
self.df[SET_ELEVATION] = pd.to_numeric(self.df[SET_ELEVATION], errors='coerce')
self.df[SET_SLOPE] = pd.to_numeric(self.df[SET_SLOPE], errors='coerce')
self.df[SET_LAND_COVER] = pd.to_numeric(self.df[SET_LAND_COVER], errors='coerce')
# self.df[SET_GRID_DIST_CURRENT] = pd.to_numeric(self.df[SET_GRID_DIST_CURRENT], errors='coerce')
# self.df[SET_GRID_DIST_PLANNED] = pd.to_numeric(self.df[SET_GRID_DIST_PLANNED], errors='coerce')
self.df[SET_SUBSTATION_DIST] =
|
pd.to_numeric(self.df[SET_SUBSTATION_DIST], errors='coerce')
|
pandas.to_numeric
|
def concat_networks(p_in_dir_data
, p_in_dir_pred
, p_out_file
, file_suffix
, flag_matrix
, p_in_reg
, p_in_target
, flag_method
, l_p_in_net
, nbr_fold
):
from pandas import read_csv, concat, DataFrame, pivot_table
from json import load
if flag_method == 'with_and_without_de':
df_net_with_de = read_csv(l_p_in_net[0], header=None, sep='\t')
df_net_with_de.index = [(reg, target) for reg, target in zip(list(df_net_with_de.iloc[:, 0])
, list(df_net_with_de.iloc[:, 1]))]
df_net_without_de = read_csv(l_p_in_net[1], header=None, sep='\t')
df_net_without_de.index = [(reg, target) for reg, target in zip(list(df_net_without_de.iloc[:, 0]), list(df_net_without_de.iloc[:, 1]))]
# remove edges that were predicted using DE network
df_net_without_de_filtered = df_net_without_de.loc[~df_net_without_de.index.isin(df_net_with_de.index), :]
df_net_all = concat([df_net_with_de, df_net_without_de_filtered], axis='index')
df_net_all.to_csv(p_out_file, header=False, index=False, sep='\t')
if flag_method == 'a':
df_net_all = DataFrame()
for p_df_net in l_p_in_net:
if p_df_net != 'NONE':
df_net = read_csv(p_df_net, header=None, sep='\t')
df_net_all = concat([df_net, df_net_all], axis='index')
df_net_all.to_csv(p_out_file, header=False, index=False, sep='\t')
elif flag_method == 'concat_cv':
# concatenate the sub-networks
df_net =
|
DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from nipype.interfaces.nilearn import NilearnBaseInterface
from nipype.interfaces.base import (
BaseInterfaceInputSpec, TraitedSpec,
File, SimpleInterface
)
class AtlasConnectivityInputSpec(BaseInterfaceInputSpec):
timeseries_file = File(exists=True, mandatory=True,
desc='The 4d file being used to extract timeseries data')
atlas_file = File(exists=True, mandatory=True,
desc='The atlas image with each roi given a unique index')
atlas_lut = File(exists=True, mandatory=True,
desc='The atlas lookup table to match the atlas image')
class AtlasConnectivityOutputSpec(TraitedSpec):
correlation_matrix = File(exists=True,
desc='roi-roi fisher z transformed correlation matrix')
correlation_fig = File(exists=True,
desc='svg of roi-roi fisher z transformed correlation matrix')
class AtlasConnectivity(NilearnBaseInterface, SimpleInterface):
"""Calculates correlations between regions of interest"""
input_spec = AtlasConnectivityInputSpec
output_spec = AtlasConnectivityOutputSpec
def _run_interface(self, runtime):
from nilearn.input_data import NiftiLabelsMasker
from nilearn.connectome import ConnectivityMeasure
from sklearn.covariance import EmpiricalCovariance
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from mne.viz import plot_connectivity_circle
import re
plt.switch_backend('Agg')
# extract timeseries from every label
masker = NiftiLabelsMasker(labels_img=self.inputs.atlas_file,
standardize=True, verbose=1)
timeseries = masker.fit_transform(self.inputs.timeseries_file)
# create correlation matrix
correlation_measure = ConnectivityMeasure(cov_estimator=EmpiricalCovariance(),
kind="correlation")
correlation_matrix = correlation_measure.fit_transform([timeseries])[0]
np.fill_diagonal(correlation_matrix, np.NaN)
# add the atlas labels to the matrix
atlas_lut_df =
|
pd.read_csv(self.inputs.atlas_lut, sep='\t')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result =
|
MultiIndex.from_product([[], lvl2, []], names=names)
|
pandas.MultiIndex.from_product
|
"""Test the two column transformer."""
import numpy as np
import numpy.testing as nt
import pandas as pd
import pandas.testing as pt
import pytest
import src.preprocessing as pp
@pytest.fixture
def data():
data = {
'f1': np.array([100, 110, 98, 1500, 30]),
'f2': 100 * np.ones((5, )),
'f3': np.zeros((5, )),
'target1': 100 + np.arange(5),
'target2': 200 + np.arange(5),
}
return pd.DataFrame(data)
def test_it_checks_init_params(data: pd.DataFrame):
def op(a, b): # pragma: no cover
return a
with pytest.raises(ValueError):
pp.TwoColumnsTransformer(['f1'], op, 'f11')
with pytest.raises(ValueError):
pp.TwoColumnsTransformer(['f1', 'f2', 'f3'], op, 'f11')
with pytest.raises(TypeError):
pp.TwoColumnsTransformer([0, 'age'], op, 'f11')
with pytest.raises(TypeError):
pp.TwoColumnsTransformer(['age', 0], op, 'f11')
with pytest.raises(ValueError):
pp.TwoColumnsTransformer(['age', ''], op, 'f11')
with pytest.raises(ValueError):
pp.TwoColumnsTransformer(['', 'income'], op, 'f11')
with pytest.raises(TypeError):
pp.TwoColumnsTransformer(['', 'income'], op, 12)
with pytest.raises(ValueError):
pp.TwoColumnsTransformer(['', 'income'], op, '')
with pytest.raises(TypeError):
pp.TwoColumnsTransformer(['age', 'income'], None, 'f11')
def test_it_checks_columns_in_df(data: pd.DataFrame):
def op(a, b): # pragma: no cover
return a
with pytest.raises(ValueError):
pt = pp.TwoColumnsTransformer(['f1', 'target3'], op, 'f11')
pt.fit(data)
with pytest.raises(ValueError):
pt = pp.TwoColumnsTransformer(['target3', 'f1'], op, 'f11')
pt.fit(data)
def test_it_runs_safety_checks(data):
def safety_a(a):
if not np.all(a != 0):
raise ValueError("ValueError")
def safety_b(b):
if not np.all(b != 101):
raise ValueError("ValueError")
with pytest.raises(ValueError):
pt = pp.TwoColumnsTransformer(
['f3', 'target2'], lambda a, b: (b - a) / a, 'percent_change',
(safety_a, safety_b))
pt.fit(data)
with pytest.raises(ValueError):
pt = pp.TwoColumnsTransformer(
['f2', 'target1'], lambda a, b: (b - a) / a, 'percent_change',
(safety_a, safety_b))
pt.fit(data)
def test_it_runs_with_no_safety_checks(data):
perc = pp.TwoColumnsTransformer(['f2', 'f1'], lambda a, b: (b - a) / a,
'percent_change')
result = perc.fit_transform(data)
expected = pd.DataFrame(
data=np.array([0.0, 0.1, -0.02, 14.0, -0.7]),
columns=['percent_change'])
pt.assert_frame_equal(result, expected)
def test_it_transforms_data(data: pd.DataFrame):
def safety_a(a):
return np.all(a != 0)
perc = pp.TwoColumnsTransformer(['f2', 'f1'], lambda a, b: (b - a) / a,
'percent_change', (safety_a, None))
result = perc.fit_transform(data)
expected = pd.DataFrame(
data=np.array([0.0, 0.1, -0.02, 14.0, -0.7]),
columns=['percent_change'])
|
pt.assert_frame_equal(result, expected)
|
pandas.testing.assert_frame_equal
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import dask.dataframe as dd
import pandas as pd
from sqlalchemy import create_engine
from os import cpu_count
# In[ ]:
def to_sql(
table, name, db_path, force=False,
):
if_exists = 'replace' if force else 'fail'
table.to_sql(name, db_path, if_exists=if_exists)
if isinstance(table, dd.DataFrame):
divisions = pd.Series(table.divisions, name='divisions')
divisions.to_sql(f'{name}/divisions', db_path, if_exists=if_exists)
def from_sql(
name, db_path, index_col=None, dask=True,
):
if index_col is None:
index_col = 'index'
if dask:
divisions = pd.read_sql_table(f'{name}/divisions', db_path, index_col='index').divisions.tolist()
table = dd.read_sql_table(name, db_path, index_col=index_col, divisions=divisions)
else:
table = pd.read_sql_table(name, db_path, index_col=index_col)
return table
# In[ ]:
def table_to_sql(
table_name,
db_path,
index_col=None,
dask=True,
npartitions=-1,
table_path=None,
force_db_refresh=False,
**kwargs
):
engine = create_engine(db_path)
if npartitions is not None:
if npartitions <= 0:
npartitions = cpu_count()
if force_db_refresh or not engine.has_table(table_name):
if not table_path:
raise ValueError(f'table_path required in order to compute table {table_name}')
print(f'Creating db table: {table_name}')
if_exists = ('replace' if force_db_refresh else 'fail')
if dask:
csv = dd.read_csv(table_path, **kwargs)
if npartitions is not None:
csv = csv.repartition(npartitions=npartitions)
if index_col is None:
# Force Dask to get a cross-partition default integer autoinc index
csv = csv.reset_index().set_index('index')
else:
csv =
|
pd.read_csv(table_path, **kwargs)
|
pandas.read_csv
|
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'
import numpy as np
import pandas as pd
from models import RnnVersion3
import gc
from keras.models import Model
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau,EarlyStopping,Callback
from tqdm import tqdm_notebook
user_app_actived = pd.read_csv('../../data/original_data/user_app_actived.csv',names=['uId', 'appId'])
usage_list = pd.read_csv('../../data/processed_data/usage_app_info.csv') #重采样的usage_app
usage_appId = pd.read_csv('../../data/processed_data/usage_appId.csv') #使用表的app词典
appId = pd.read_csv('../../data/processed_data/appId.csv') #激活表的app词典
user_app_actived['app_list'] = user_app_actived.appId.str.split('#')
import ast
from tqdm import tqdm
usage_train = []
for idx in tqdm(usage_list.appId):
usage_train.append(ast.literal_eval(idx))
usage_list['app_list'] = usage_train
user_app_actived.drop('appId',axis=1,inplace=True)
usage_list.drop('appId',axis=1,inplace=True)
user_app_actived = pd.merge(user_app_actived, usage_list, how='left', on='uId')
result = []
for index,row in tqdm(user_app_actived.iterrows()):
try:
result.append(np.sort(list(set(row['app_list_x']) | set(row['app_list_y']))))
except:
result.append(row['app_list_x'])
user_app_actived['app_list'] = result
user_app_actived.drop(['app_list_x','app_list_y'],axis=1,inplace =True)
del usage_list
gc.collect()
x_train = pd.read_csv('../../data/original_data/age_train.csv',names=['uId','age_group'],dtype={'uId':np.int32, 'age_group':np.int8})
x_test = pd.read_csv('../../data/original_data/age_test.csv',names=['uId'],dtype={'uId':np.int32})
x_train = pd.merge(x_train, user_app_actived, how='left', on='uId')
x_test = pd.merge(x_test, user_app_actived, how='left', on='uId')
y_train = x_train.age_group - 1
x_train = x_train.drop('age_group',axis=1)
del user_app_actived
gc.collect()
usage_appId = pd.read_csv('../../data/processed_data/usage_appId_top_num100000.csv')
usage_appId = usage_appId[-20000:]
usage_appId['id'] = np.arange(0,20000)
all_appid = list(set(appId.appId.tolist() + usage_appId.appId.tolist()))
app_dict = dict(zip(all_appid,np.arange(len(all_appid))))
app_list = [[app_dict[x] for x in apps if x in app_dict] for apps in x_train.app_list]
app_test = [[app_dict[x] for x in apps if x in app_dict] for apps in x_test.app_list]
from keras.preprocessing import sequence
app_list = sequence.pad_sequences(app_list, maxlen=170)
app_test = sequence.pad_sequences(app_test, maxlen=170)
x_train.drop('app_list',axis=1,inplace=True)
x_test.drop('app_list',axis=1,inplace=True)
gc.collect()
class _Data_Preprocess:
def __init__(self):
self.int8_max = np.iinfo(np.int8).max
self.int8_min = np.iinfo(np.int8).min
self.int16_max = np.iinfo(np.int16).max
self.int16_min = np.iinfo(np.int16).min
self.int32_max = np.iinfo(np.int32).max
self.int32_min = np.iinfo(np.int32).min
self.int64_max = np.iinfo(np.int64).max
self.int64_min = np.iinfo(np.int64).min
self.float16_max = np.finfo(np.float16).max
self.float16_min = np.finfo(np.float16).min
self.float32_max = np.finfo(np.float32).max
self.float32_min = np.finfo(np.float32).min
self.float64_max = np.finfo(np.float64).max
self.float64_min = np.finfo(np.float64).min
'''
function: _get_type(self,min_val, max_val, types)
get the correct types that our columns can trans to
'''
def _get_type(self, min_val, max_val, types):
if types == 'int':
if max_val <= self.int8_max and min_val >= self.int8_min:
return np.int8
elif max_val <= self.int16_max <= max_val and min_val >= self.int16_min:
return np.int16
elif max_val <= self.int32_max and min_val >= self.int32_min:
return np.int32
return None
elif types == 'float':
if max_val <= self.float16_max and min_val >= self.float16_min:
return np.float16
if max_val <= self.float32_max and min_val >= self.float32_min:
return np.float32
if max_val <= self.float64_max and min_val >= self.float64_min:
return np.float64
return None
'''
function: _memory_process(self,df)
column data types trans, to save more memory
'''
def _memory_process(self, df):
init_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('Original data occupies {} GB memory.'.format(init_memory))
df_cols = df.columns
for col in tqdm_notebook(df_cols):
try:
if 'float' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'float')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
elif 'int' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'int')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
except:
print(' Can not do any process for column, {}.'.format(col))
afterprocess_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('After processing, the data occupies {} GB memory.'.format(afterprocess_memory))
return df
memory_preprocess = _Data_Preprocess()
train = pd.read_csv('../../data/features/base_train.csv')
test = pd.read_csv('../../data/features/base_test.csv')
train=memory_preprocess._memory_process(train)
test=memory_preprocess._memory_process(test)
print(test.info())
gc.collect()
actived_features_all = pd.read_csv('../../data/features/actived_features_all.csv')
actived_features_all=memory_preprocess._memory_process(actived_features_all)
train = pd.merge(train, actived_features_all, how='left', on='uId').fillna(0)
test = pd.merge(test, actived_features_all, how='left', on='uId').fillna(0)
del actived_features_all
gc.collect()
act_use_rnn_hide_train=pd.read_csv('../../data/features/act_use_rnn_hide_train.csv')
act_use_rnn_hide_train=memory_preprocess._memory_process(act_use_rnn_hide_train)
act_use_rnn_hide_train.rename(columns={'Unnamed: 0': 'uId'}, inplace=True)
train = pd.merge(train, act_use_rnn_hide_train, how='left', on='uId').fillna(0)
del act_use_rnn_hide_train
act_use_rnn_hide_test=pd.read_csv('../../data/features/act_use_rnn_hide_test.csv')
act_use_rnn_hide_test=memory_preprocess._memory_process(act_use_rnn_hide_test)
act_use_rnn_hide_test.rename(columns={'Unnamed: 0': 'uId'}, inplace=True)
test = pd.merge(test, act_use_rnn_hide_test, how='left', on='uId').fillna(0)
print(test.info())
del act_use_rnn_hide_test
gc.collect()
train_uId = x_train.uId.tolist()
test_uId = x_test.uId.tolist()
test.index = test.uId.tolist()
train.index = train.uId.tolist()
test = test.loc[test_uId,:]
train = train.loc[train_uId,:]
train.drop(['uId','age_group'],axis=1,inplace=True)
test.drop('uId',axis=1,inplace=True)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
from sklearn.preprocessing import StandardScaler,MinMaxScaler
train = train.replace([np.inf, -np.inf], np.nan).fillna(0)
test = test.replace([np.inf, -np.inf], np.nan).fillna(0)
scaler = MinMaxScaler()
scaler.fit(pd.concat([train,test],axis=0))
train = scaler.transform(train)
test = scaler.transform(test)
train = memory_preprocess._memory_process(pd.DataFrame(train))
test = memory_preprocess._memory_process(pd.DataFrame(test))
gc.collect()
train = train.values
test = test.values
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=None)
from sklearn.model_selection import train_test_split, StratifiedKFold
kfold = StratifiedKFold(n_splits=5, random_state=10, shuffle=False)
y_testb = np.zeros((x_test.shape[0],6))
y_valb = np.zeros((x_train.shape[0],6))
for i, (train_index, valid_index) in enumerate(kfold.split(app_list, np.argmax(y_train,axis=1))):
X_train1, X_val1, X_train2, X_val2,Y_train, Y_val = app_list[train_index],app_list[valid_index], train[train_index], train[valid_index], y_train[train_index], y_train[valid_index]
filepath="weights_best5.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=2, save_best_only=True, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.6, patience=1, min_lr=0.0001, verbose=2)
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto')
callbacks = [checkpoint, reduce_lr]
model = RnnVersion3()
model.fit([X_train1,X_train2], Y_train, batch_size=128, epochs=4, validation_data=([X_val1,X_val2], Y_val), verbose=1, callbacks=callbacks,
)
model.load_weights(filepath)
y_valb[valid_index] = model.predict([X_val1,X_val2], batch_size=128, verbose=1)
y_testb += np.array(model.predict([app_test,test], batch_size=128, verbose=1))/5
y_valb =
|
pd.DataFrame(y_valb,index=train_uId)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 30 14:37:43 2021
@author: Three_Gold
"""
"""
1、对提供的图书大数据做初步探索性分析。分析每一张表的数据结构。
2、对给定数据做数据预处理(数据清洗),去重,去空。
3、按照分配的院系保留自己需要的数据(教师的和学生的都要),这部分数据为处理好的数据。
4、将处理好的数据保存,文件夹名称为预处理后数据。
"""
import pandas as pd
import time
import os
import jieba
import wordcloud
path = os.getcwd()#获取项目根目录
path = path.replace('\\', '/')#设置项目根目录路径
Path = path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/'#设置预处理后数据根目录路径
#数据清洗,去重,去空
A1_UserID = pd.read_excel(path + '/代码/12暖暖小组01号彭鑫项目/原始数据/读者信息.xlsx')
A1_UserID = A1_UserID.dropna()#去除空值
A1_UserID = A1_UserID.drop_duplicates()#去重
A1_UserID = A1_UserID.reset_index(drop=True)#重设索引
A1_UserID.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/清洗后数据/读者信息.xlsx')#保存数据
book_list = pd.read_excel(path + '/代码/12暖暖小组01号彭鑫项目/原始数据/图书目录.xlsx')#读取图书目录预处理后数据
book_list = book_list.dropna()#去除空值
book_list = book_list.drop_duplicates()#去重
book_list = book_list.reset_index(drop=True)#重设索引
book_list.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/清洗后数据/图书目录.xlsx')#保存数据
def bookyearsdata():#对借还信息进行去重去空再保存
Year_All = ['2014','2015','2016','2017']
for year in Year_All:
address = path + '/代码/12暖暖小组01号彭鑫项目/原始数据/图书借还' + year +'.xlsx'#获得预处理后数据路径
address_last = path + '/代码/12暖暖小组01号彭鑫项目/清洗后数据/图书借还' + year +'.xlsx'#获得清洗后数据保存路径
book =
|
pd.read_excel(address)
|
pandas.read_excel
|
import pandas as pd
import numpy as np
from .icd_parsing_functions import get_code_categories, charlson_calc
def preprocess_all(static_vars, dynamic_vars, outcome_vars, input_vars):
print(dynamic_vars.head())
top_k_feats = list(dynamic_vars['label'].value_counts()[:5].index)
id_vars = ['subject_id','hadm_id','stay_id']
dynamic_regular = get_regular_timeseries(dynamic_vars, id_vars, top_k_feats)
print(dynamic_regular.head())
dynamic_regular_imputed = impute_dynamic_data(dynamic_regular,id_vars, top_k_feats)
print(dynamic_regular_imputed.head())
static_vars_clean = preprocess_static_vars(static_vars)
return static_vars_clean, dynamic_regular_imputed, outcome_vars, input_vars
def preprocess_static_vars(static_vars_df: pd.DataFrame) -> pd.DataFrame:
static_vars_clean = static_vars_df.copy()
static_vars_clean = pd.get_dummies(static_vars_clean)
return static_vars_clean
def get_regular_timeseries(timestamp_timeseries: pd.DataFrame, id_vars, feature_names) -> pd.DataFrame:
"""converts a timeseries with each variable recorded as value - datetime
into hourly (or other interval)"""
temp = timestamp_timeseries
temp['time_in'] = pd.to_datetime(temp['charttime']) - pd.to_datetime(temp['intime'])
temp = temp.drop(['intime','outtime','charttime','itemid','valueuom'], axis=1) #TODO: check if valueuom can be dropped
temp = temp.set_index(['subject_id','hadm_id','stay_id','time_in'])
temp = temp.loc[temp['label'].isin(feature_names),:]
b =
|
pd.pivot(temp, columns=['label'])
|
pandas.pivot
|
"""
Module for data exploration for ARIMA modeling.
This module contains the back-end exploration of river run flow rate
data and exogenous predictors to determine the best way to create a
time-series model of the data. Note that since this module was only used
once (i.e. is not called in order to create ongoing predictions),
it is not accompanied by any unit testing.
Functions:
daily_avg: takes time series with measurements on different
timeframes and creates a dataframe with daily averages for flow
rate and exogenous predictors
test_stationarity: implements Dickey-Fuller test and rolling average
plots to check for stationarity of the time series
plot_autocorrs: creates plots of autocorrelation function and partial
autocorrelation function to help determine p and q parameters for ARIMA
model
test_model: runs stationarity tests and acf/pcf tests and then
creates ARIMA model for one run and plots results
"""
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import arma_order_select_ic
from riverrunner.repository import Repository
REPO = Repository()
def daily_avg(time_series):
"""Creates dataframe needed for modelling
Takes time series with measurements on different timeframes and creates a
dataframe with daily averages for flow rate and exogenous predictors.
Args:
time_series: dataframe with metrics for one run_id, assumes output
from get_measurements function
Returns:
DataFrame: containing daily measurements
"""
precip = time_series[time_series.metric_id == '00003']
precip['date_time'] =
|
pd.to_datetime(precip['date_time'], utc=True)
|
pandas.to_datetime
|
# coding: utf-8
# Scrapes wikipedia page for TDP of different cpus
import pandas as pd
url = "https://en.wikipedia.org/wiki/List_of_CPU_power_dissipation_figures"
df=pd.read_html(url)
power_dfs = [x for x in df if "Thermal Design Power" in x.columns or "TDP" in x.columns or "Power" in x.columns]
for x in power_dfs:
x.rename(columns = {'Thermal Design Power':'TDP'}, inplace = True)
x.rename(columns = {'Power':'TDP'}, inplace = True)
power_dfs = [x.filter(["Model", "TDP"]) for x in power_dfs]
|
pd.concat(power_dfs)
|
pandas.concat
|
from datetime import datetime, timedelta
from textwrap import dedent
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.decorators import dag, task
from airflow.models.baseoperator import chain
#Importação das bibliotecas para processamento do dataframe
import pandas as pd
import numpy as np
import zipfile
import requests
from io import BytesIO
import os
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, cross_val_predict, cross_val_score
from sklearn.svm import SVC
from sklearn import tree
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from joblib import dump, load
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
@dag(schedule_interval=None, start_date=datetime(2021, 1, 1), catchup=False, tags=['example'])
def Housing():
"""
Flow para apredizagem de maquina sobre o dataset housing
"""
@task()
def start():
print("iniciando...")
@task()
def criar_diretorio():
os.makedirs('data/housing', exist_ok=True)
@task()
def download_housing_file():
#download do zip com o csv
url = "https://github.com/ozeiasgodoy/notebooks/blob/main/dados/housing.zip?raw=true"
filebytes_housing = BytesIO(
requests.get(url).content
)
with open("data/housing/housing.zip", "wb") as outfile:
outfile.write(filebytes_housing.getbuffer())
@task()
def extract_housing_file():
myzip = zipfile.ZipFile("data/housing/housing.zip")
myzip.extractall('data/housing')
@task()
def criar_novos_campos():
#Carregando o arquivo extraido para um dataframe
housing = pd.read_csv('data/housing/housing.csv')
#numero de comodos por domicilio
housing['rooms_per_household'] = housing['total_rooms']/housing['households']
#numero de comodos
housing['bedrooms_per_room'] = housing['total_bedrooms']/housing['total_rooms']
#população por domicilio
housing['population_per_household'] = housing['population']/housing['households']
housing.to_csv("data/housing/housing_campos_novos.csv")
@task()
def tratar_campos_nulos():
#Carregando o arquivo extraido para um dataframe
housing = pd.read_csv('data/housing/housing_campos_novos.csv')
housing['total_bedrooms'] = housing['total_bedrooms'].fillna(housing['total_bedrooms'].mean())
housing['bedrooms_per_room'] =housing['bedrooms_per_room'].fillna(housing['bedrooms_per_room'].mean())
housing.to_csv("data/housing/housing_sem_campos_nulos.csv")
@task()
def aplicar_one_hot_encoding():
#Carregando o arquivo extraido para um dataframe
housing = pd.read_csv('data/housing/housing_sem_campos_nulos.csv')
housing = pd.get_dummies(housing, columns=['ocean_proximity'])
housing.to_csv("data/housing/housing_hot_encoding.csv")
@task()
def normalizar_dados():
#Carregando o arquivo extraido para um dataframe
housing = pd.read_csv('data/housing/housing_hot_encoding.csv')
min_max_scaler = MinMaxScaler()
min_max_scaler.fit(housing)
housing[['longitude', 'latitude', 'housing_median_age', 'total_rooms', 'total_bedrooms', 'population',
'households', 'median_income', 'rooms_per_household','bedrooms_per_room',
'population_per_household']] = min_max_scaler.fit_transform(
housing[['longitude', 'latitude', 'housing_median_age', 'total_rooms', 'total_bedrooms', 'population',
'households', 'median_income', 'rooms_per_household','bedrooms_per_room','population_per_household']]
)
housing.to_csv("data/housing/housing_normalizado.csv")
@task()
def dividir_dados_treino_teste():
housing = pd.read_csv('data/housing/housing_normalizado.csv')
housing_train, housing_test = train_test_split(housing, test_size=0.3, random_state=42)
housing_train.to_csv("data/housing/housing_train.csv")
housing_test.to_csv("data/housing/housing_test.csv")
@task()
def treinar_LinearRegression():
housing = pd.read_csv('data/housing/housing_train.csv')
X_train = housing.drop(["median_house_value"], axis=1)
Y_train = housing["median_house_value"]
#Modelos de classificação
lr = LinearRegression()
lr.fit(X_train, Y_train)
dump(lr, "data/housing/LinearRegression_housing.joblib")
@task()
def treinar_DecisionTreeRegressor():
housing = pd.read_csv('data/housing/housing_train.csv')
X_train = housing.drop(["median_house_value"], axis=1)
Y_train = housing["median_house_value"]
#Modelos de classificação
lr = DecisionTreeRegressor()
lr.fit(X_train, Y_train)
dump(lr, "data/housing/DecisionTreeRegressor(housing.joblib")
@task()
def treinar_RandomForestRegressor():
housing =
|
pd.read_csv('data/housing/housing_train.csv')
|
pandas.read_csv
|
import numpy as np
import os
from scipy.spatial.distance import cdist
import time
import shutil
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Model
from keras.layers import Dense
from keras.utils.np_utils import *
from keras.applications.resnet import ResNet50
from keras.applications.densenet import DenseNet121,DenseNet169,DenseNet201
from keras.applications.resnet import ResNet101, ResNet152
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras.applications.resnet import preprocess_input
from keras.applications.vgg16 import preprocess_input as preprocess_input_vgg
from tensorflow.keras.optimizers import SGD
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from sklearn.metrics import accuracy_score, precision_score,classification_report, confusion_matrix
import seaborn as sn
import cv2
def plot_pred_prob(result, result_dir, error_id):
fig = plt.figure()
fig.set_size_inches(5, 4)
fig_6 = fig.add_subplot('111')
fig.set_tight_layout(True)
for n in range(len(result['logits'])):
if n in error_id:
print(n)
fig_6.cla()
fig.set_tight_layout(True)
logits = result['logits'][n]
fig_6.set_xlim(0, 1.1)
plane_names = ['bladder', 'bowel', 'gallbladder', 'kidney', 'liver', 'spleen']
colors = ['red','orange', 'gold', 'green', 'blue', 'purple']
fig_6.set_xlabel("Predicted Probability", fontsize=20)
fig_6.set_yticks(range(len(plane_names)))
fig_6.tick_params(axis='y', labelsize=18)
fig_6.tick_params(axis='x', labelsize=16)
fig_6.set_yticklabels(plane_names)
fig_6.set_xticks(np.arange(0, 1.1, 0.2))
fig_6.invert_yaxis()
fig_6.barh(range(len(plane_names)), logits, color=colors)
step = n
img_name = "err_" + str(step) + ".jpg"
result_dir_new = os.path.join(result_dir, "recognition_prob")
if not os.path.exists(result_dir_new):
os.makedirs(result_dir_new)
img = os.path.join(result_dir_new, img_name)
fig.savefig(img, quality=100, bbox_inches='tight')
fig1 = plt.figure()
fig1.set_size_inches(5, 4)
fig_0 = fig.add_subplot('111')
fig1.set_tight_layout(True)
for n in range(len(result['logits'])):
if n in error_id:
print(n)
fig_0.cla()
fig1.set_tight_layout(True)
# img = (result['img'][n]).astype('uint8')
img = result['img'][n]
img_name = "err_img_" + str(n) + os.path.basename(img)
img = cv2.imread(img)
plt.imshow(img, cmap='gray')
plt.axis('off')
result_dir_new = os.path.join(result_dir, "recognition_prob")
if not os.path.exists(result_dir_new):
os.makedirs(result_dir_new)
img = os.path.join(result_dir_new, img_name)
fig1.savefig(img, quality=100, bbox_inches='tight')
def recognize_organs_fc(nn_model):
error_id = []
if nn_model == 'resnet50':
base_model = ResNet50(include_top=False, weights=None, pooling='avg')
elif nn_model == 'resnet101':
base_model = ResNet101(include_top=False, weights=None, pooling='avg')
elif nn_model == 'resnet152':
base_model = ResNet152(include_top=False, weights=None, pooling='avg')
elif nn_model == 'densenet121':
base_model = DenseNet121(include_top=False, weights=None, pooling='avg')
elif nn_model == 'densenet169':
base_model = DenseNet169(include_top=False, weights=None, pooling='avg')
elif nn_model == 'densenet201':
base_model = DenseNet201(include_top=False, weights=None, pooling='avg')
else:
raise NotImplementedError("The NN model is not implemented!")
predictions = Dense(6, activation='softmax')(base_model.output)
model = Model(inputs=base_model.input, outputs=predictions)
weight_file = './finetune/' + nn_model + '/finetune_weights_50_epoch.h5'
assert os.path.exists(weight_file) is True, "Weight path is empty"
model.load_weights(weight_file, by_name=False)
# preprocess test data
test_dir = './dataset/img/test/'
test_imgs = []
test_imgs_original = []
test_labels = []
test_labels_int = []
test_img_names = os.listdir(test_dir)
start = time.clock()
for i in range(len(test_img_names)):
img_path = os.path.join(test_dir, test_img_names[i])
test_imgs_original.append(img_path)
test_img = load_img(img_path)
test_img = img_to_array(test_img)
test_img = preprocess_input(test_img)
test_imgs.append(test_img)
test_class = test_img_names[i].split('-')[0]
test_labels.append(test_class)
test_imgs = np.array(test_imgs)
# encode the string label to integer
organs = ['bladder', 'bowel', 'gallbladder', 'kidney', 'liver', 'spleen']
mapping = {}
for i in range(len(organs)):
mapping[organs[i]] = i
for i in range(len(test_labels)):
test_labels_int.append(mapping[test_labels[i]])
# compile model
learning_rate = 0.01
decay_rate = 0
momentum = 0.9
sgd = SGD(lr=learning_rate, momentum=momentum, decay=decay_rate, nesterov=False)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['acc'])
# predict with model
test_logits = model.predict(test_imgs)
test_predictions = np.argmax(test_logits, axis=1)
num_acc = 0
end = time.clock()
total_time = end - start
print("Average inference time for one image: {}".format(total_time/len(test_imgs)))
for i in range(len(test_imgs)):
# print("true: {} predict: {}".format(test_labels_int[i], test_predictions[i]))
if test_predictions[i] == test_labels_int[i]:
num_acc += 1
else:
error_id.append(i)
result_dict = {"img": test_imgs_original,
"logits": test_logits}
plot_pred_prob(result_dict, "fc_errors", error_id)
acc = num_acc / len(test_imgs)
print("Model: {}, acc: {:.4f}, {}/{} correct.".format(nn_model, acc, num_acc, len(test_imgs)))
# scores = model.evaluate(test_imgs, test_labels, verbose=0)
# print("Model: {} Test acc: {:.4f}".format(nn_model, scores[1]))
# print(classification_report(test_labels_int, test_predictions, target_names=organs, digits=6))
confusion = confusion_matrix(test_labels_int, test_predictions)
df_cm = pd.DataFrame(confusion, index=['bladder', 'bowel', 'gallbladder', 'kidney', 'liver', 'spleen'],
columns=['bladder', 'bowel', 'gallbladder', 'kidney', 'liver', 'spleen'])
plt.figure(figsize=(5, 4))
plt.xlabel("Predicted label")
plt.ylabel("True label")
cmap = sn.cm.rocket_r
# cmap = plt.cm.Blues
ax = sn.heatmap(df_cm, annot=True, fmt='.20g', cmap=cmap)
ax.set_xlabel("Predicted class", fontsize=12)
ax.set_ylabel("True class", fontsize=12)
result_dir = "confusion/{}".format(nn_model)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
plt.savefig(
os.path.join(result_dir, "confusion_matrix_{}_fc.jpg".format(nn_model)),
quality=100,
bbox_inches='tight')
def recognize_organs(nn_model, pca=False):
'''
Use the k-NN method to classify the abdominal organ in the image
by comparing distances between features of the test images
and features of images in the training set
:return:
'''
if pca:
nn_model = nn_model + '_pca'
testlist = []
databaselist = []
test_class_list = []
data_class_list = []
test_dir = './dataset/feature_' + nn_model + '/test'
train_dir = './dataset/feature_' + nn_model + '/train'
result_dir = './result/'
test_imgs = os.listdir(test_dir)
train_imgs = os.listdir(train_dir)
# read image feature vectors and the labels
for i in range(len(test_imgs)):
test_img = os.path.join(test_dir, test_imgs[i])
testlist.append(test_img)
test_class = test_imgs[i].split('-')[0]
test_class_list.append(test_class)
for i in range(len(train_imgs)):
train_img = os.path.join(train_dir,train_imgs[i])
databaselist.append(train_img)
train_class = train_imgs[i].split('-')[0]
data_class_list.append(train_class)
# k values
k_list = [1,3,5,7,9]
distance_list = ['euclidean', 'cityblock', 'canberra', 'cosine']
# k_list = [3]
# distance_list = ['cityblock']
correct_rate_list = dict()
for dist_category in distance_list:
correct_rate_list[dist_category] = []
for k in k_list:
num_test = len(testlist)
num_database = len(databaselist)
testlist_new = []
test_class_list_new = []
pred = []
pred_img = []
pred_num = []
dists = np.zeros((num_test, num_database))
dist_pred = []
start = time.clock()
for i in range(num_test):
# print('image %d: %s' % (i, testlist[i]))
kclose_list = []
kclose_img_list = []
kclose_dist_list = [] # k closest distances
for j in range(num_database):
test_vec = np.load(testlist[i])
database_vec = np.load(databaselist[j])
dists[i][j] = cdist(test_vec, database_vec, dist_category)
# find the k nearest neighbors
dist_k_min = np.argsort(dists[i])[:k]
pred_num.append(i)
testlist_new.append(testlist[i])
test_class_list_new.append(test_class_list[i])
# k-NN majority vote
for m in range(k):
kclose_list.append(data_class_list[dist_k_min[m]])
kclose_img_list.append(databaselist[dist_k_min[m]])
kclose_dist_list.append(dists[i][dist_k_min[m]])
# print('For %d ,the %d th closest img is %s' % (i, m, kclose_img_list[-1]))
# print('with the %d th smallest distance: %f' % (m, kclose_dist_list[-1]))
# k-NN majority vote
for n in range(len(kclose_list)):
old = kclose_list.count(kclose_list[n])
num = max(kclose_list.count(m) for m in kclose_list)
if (num == old):
pred.append(kclose_list[n])
pred_img.append(kclose_img_list[n])
dist_pred.append(kclose_dist_list[n])
# print('%d---true: %s ,pred: %s' % (i, test_class_list[i], pred[-1]))
break
# calculate the accuracy
correct = 0
num_test_new = len(testlist_new)
error_test_list = []
error_pred_img = []
error_num = []
error_dist = []
for i in range(num_test_new):
if (pred[i] == test_class_list_new[i]):
correct += 1
else:
# bad case
error_num.append(pred_num[i])
error_test_list.append(testlist_new[i])
error_pred_img.append(pred_img[i])
error_dist.append(dist_pred[i])
for i in range(len(error_test_list)):
print('%d is incorrect, true img: %s , pred img: %s, distance: %f' % (
error_num[i], error_test_list[i], error_pred_img[i], error_dist[i]))
# copy wrong images to /result
for file in error_test_list:
file_name = os.path.split(file)[1]
file_without_ext = os.path.splitext(file_name)[0]
file_without_ext = os.path.splitext(file_without_ext)[0]
# print(file_without_ext)
raw_img_name = file_without_ext + '.png'
raw_img = os.path.join('./dataset/img/test', raw_img_name)
error_savepath = os.path.join('./result/k_{}/error/'.format(k))
if not os.path.exists(error_savepath):
os.makedirs(error_savepath)
shutil.copy(raw_img, error_savepath)
correct_rate = correct / num_test_new
correct_rate_list[dist_category].append(correct_rate)
print("Current dist:", dist_category)
print("Current feature extractor:", nn_model)
print('k = %d, The correct rate is %.2f%%' % (k, correct_rate * 100))
end = time.clock()
total_time = end - start
print("Average inference time for one image: {:.2f}".format(total_time / num_test_new))
# find the best configuration of k, distance metric for current feature extractor
max_acc = 0
best_k = 0
best_dist = None
for dist in correct_rate_list:
for k in range(len(correct_rate_list[dist])):
if correct_rate_list[dist][k] > max_acc:
max_acc = correct_rate_list[dist][k]
best_k = k_list[k]
best_dist = dist
print("best acc: ",max_acc,"best distance metric: ",best_dist,"best k: ", best_k)
#######################################
# use best k and dist to calculate the confusion matrix
num_test = len(testlist)
num_database = len(databaselist)
testlist_new = []
test_class_list_new = []
pred = []
pred_img = []
pred_num = []
dists = np.zeros((num_test, num_database))
dist_pred = []
for i in range(num_test):
# print('image %d: %s' % (i, testlist[i]))
kclose_list = []
kclose_img_list = []
kclose_dist_list = [] # k closest distances
for j in range(num_database):
test_vec = np.load(testlist[i])
database_vec = np.load(databaselist[j])
dists[i][j] = cdist(test_vec, database_vec, best_dist)
# find the k nearest neighbors
dist_k_min = np.argsort(dists[i])[:best_k]
pred_num.append(i)
testlist_new.append(testlist[i])
test_class_list_new.append(test_class_list[i])
# k-NN majority vote
for m in range(best_k):
kclose_list.append(data_class_list[dist_k_min[m]])
kclose_img_list.append(databaselist[dist_k_min[m]])
kclose_dist_list.append(dists[i][dist_k_min[m]])
print('For %d test img: %s, the %d th closest img is %s' % (i, testlist_new[i],m, kclose_img_list[-1]))
# print('with the %d th smallest distance: %f' % (m, kclose_dist_list[-1]))
# k-NN majority vote
for n in range(len(kclose_list)):
old = kclose_list.count(kclose_list[n])
num = max(kclose_list.count(m) for m in kclose_list)
if (num == old):
pred.append(kclose_list[n])
pred_img.append(kclose_img_list[n])
dist_pred.append(kclose_dist_list[n])
# print('%d---true: %s ,pred: %s' % (i, test_class_list[i], pred[-1]))
break
confusion = confusion_matrix(test_class_list_new, pred)
df_cm = pd.DataFrame(confusion, index=['bladder', 'bowel', 'gallbladder', 'kidney', 'liver', 'spleen'],
columns=['bladder', 'bowel', 'gallbladder', 'kidney', 'liver', 'spleen'])
plt.figure(figsize=(5, 4))
plt.xlabel("Predicted label")
plt.ylabel("True label")
cmap = sn.cm.rocket_r
# cmap = plt.cm.Blues
ax = sn.heatmap(df_cm, annot=True, fmt='.20g', cmap=cmap)
ax.set_xlabel("Predicted class", fontsize=12)
ax.set_ylabel("True class", fontsize=12)
result_dir = "confusion/{}".format(nn_model)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
whether_pca = 'ft_pca' if pca else 'ft'
plt.savefig(
os.path.join(result_dir, "confusion_matrix_{}_{}_k_{}_{}.jpg".format(nn_model, whether_pca, best_k, best_dist)),
quality=100,
bbox_inches='tight')
############################################
acc_csv_file = os.path.join(result_dir, nn_model+"_kNN_accuracy.csv")
save =
|
pd.DataFrame(correct_rate_list)
|
pandas.DataFrame
|
import os
import numpy as np
import holoviews as hv
import pandas as pd
import logging
from bokeh.models import HoverTool
import holoviews as hv
import datashader as ds
from holoviews.operation.datashader import aggregate, datashade, dynspread
import colorcet as cc
import param
import parambokeh
from lsst.pipe.tasks.functors import (Mag, CustomFunctor, DeconvolvedMoments,
StarGalaxyLabeller, RAColumn, DecColumn,
Column, SdssTraceSize, PsfSdssTraceSizeDiff,
HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor)
default_xFuncs = {'base_PsfFlux' : Mag('base_PsfFlux'),
'modelfit_CModel' : Mag('modelfit_CModel')}
default_yFuncs = {'modelfit_CModel - base_PsfFlux' : CustomFunctor('mag(modelfit_CModel) - mag(base_PsfFlux)'),
'Deconvolved Moments' : DeconvolvedMoments(),
'Footprint NPix' : Column('base_Footprint_nPix'),
'ext_photometryKron_KronFlux - base_PsfFlux' : \
CustomFunctor('mag(ext_photometryKron_KronFlux) - mag(base_PsfFlux)'),
'base_GaussianFlux - base_PsfFlux' : CustomFunctor('mag(base_GaussianFlux) - mag(base_PsfFlux)'),
'SDSS Trace Size' : SdssTraceSize(),
'PSF - SDSS Trace Size' : PsfSdssTraceSizeDiff(),
'HSM Trace Size' : HsmTraceSize(),
'PSF - HSM Trace Size': PsfHsmTraceSizeDiff()}
default_labellers = {'default':StarGalaxyLabeller()}
def getFunc(funcName):
if funcName in default_xFuncs:
return default_xFuncs[funcName]
elif funcName in default_yFuncs:
return default_yFuncs[funcName]
else:
return CustomFunctor(funcName)
def getLabeller(labellerName):
return default_labellers[labellerName]
def write_selected(explorer, filename):
print(explorer._selected.head())
def get_default_range(x, y):
x = pd.Series(x).dropna()
y = pd.Series(y).dropna()
xMed = np.median(x)
yMed = np.median(y)
xMAD = np.median(np.absolute(x - xMed))
yMAD = np.median(np.absolute(y - yMed))
ylo = yMed - 10*yMAD
yhi = yMed + 10*yMAD
xlo, xhi = x.quantile([0., 0.99])
xBuffer = xMAD/4.
xlo -= xBuffer
xhi += xBuffer
return (xlo, xhi), (ylo, yhi)
class QAExplorer(hv.streams.Stream):
catalog = param.Path(default='forced_big.parq', search_paths=['.','data'])
query = param.String(default='')
id_list = param.String(default='')
x_data = param.ObjectSelector(default='base_PsfFlux',
objects=list(default_xFuncs.keys()))
x_data_custom = param.String(default='')
y_data = param.ObjectSelector(default='modelfit_CModel - base_PsfFlux',
objects=list(default_yFuncs.keys()))
y_data_custom = param.String(default='')
labeller = param.ObjectSelector(default='default',
objects = list(default_labellers.keys()))
object_type = param.ObjectSelector(default='all',
objects=['all', 'star', 'galaxy'])
nbins = param.Integer(default=20, bounds=(10,100))
# write_selected = param.Action(default=write_selected)
output = parambokeh.view.Plot()
def __init__(self, rootdir='.', *args, **kwargs):
super(QAExplorer, self).__init__(*args, **kwargs)
self.rootdir = rootdir
self._ds = None
self._selected = None
# Sets self.ds property
# self._set_data(self.catalog, self.query, self.id_list,
# self.x_data, self.x_data_custom,
# self.y_data, self.y_data_custom, self.labeller)
@property
def funcs(self):
return self._get_funcs(self.x_data, self.x_data_custom,
self.y_data, self.y_data_custom,
self.labeller)
def _get_funcs(self, x_data, x_data_custom, y_data, y_data_custom,
labeller):
if self.x_data_custom:
xFunc = getFunc(self.x_data_custom)
else:
xFunc = getFunc(self.x_data)
if self.y_data_custom:
yFunc = getFunc(self.y_data_custom)
else:
yFunc = getFunc(self.y_data)
labeller = getLabeller(self.labeller)
return CompositeFunctor({'x' : xFunc,
'y' : yFunc,
'label' : labeller,
'id' : Column('id'),
'ra' : RAColumn(),
'dec': DecColumn()})
def _set_data(self, catalog, query, id_list,
x_data, x_data_custom, y_data, y_data_custom,
labeller, **kwargs):
funcs = self._get_funcs(x_data, x_data_custom,
y_data, y_data_custom,
labeller)
df = funcs(catalog, query=query)
df.index = df['id']
if id_list:
ids = self.get_saved_ids(id_list)
df = df.loc[ids]
ok = np.isfinite(df.x) & np.isfinite(df.y)
xdim = hv.Dimension('x', label=funcs['x'].name)
ydim = hv.Dimension('y', label=funcs['y'].name)
self._ds = hv.Dataset(df[ok], kdims=[xdim, ydim, 'ra', 'dec', 'id'], vdims=['label'])
@property
def ds(self):
if self._ds is None:
self._set_data(self.catalog, self.query, self.id_list,
self.x_data, self.x_data_custom,
self.y_data, self.y_data_custom,
self.labeller)
return self._ds
@property
def selected(self):
return self._selected
@property
def id_path(self):
return os.path.join(self.rootdir, 'data', 'ids')
def save_selected(self, name):
filename = os.path.join(self.id_path, '{}.h5'.format(name))
logging.info('writing {} ids to {}'.format(len(self.selected), filename))
self.selected.to_hdf(filename, 'ids', mode='w')
@property
def saved_ids(self):
"""Returns list of names of selected IDs
"""
return [os.path.splitext(f)[0] for f in os.listdir(self.id_path)]
def get_saved_ids(self, id_list):
id_list = id_list.split(',')
files = [os.path.join(self.id_path, '{}.h5'.format(f.strip())) for f in id_list]
return pd.concat([
|
pd.read_hdf(f, 'ids')
|
pandas.read_hdf
|
import math
import string
from typing import Optional, Sequence, Tuple
import hypothesis.strategies as st
import numpy as np
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
import fletcher as fr
from fletcher.testing import examples
try:
# Only available in pandas 1.2+
# When this class is defined, we can also use `.str` on fletcher columns.
from pandas.core.strings.object_array import ObjectStringArrayMixin # noqa F401
_str_accessors = ["str", "fr_str"]
except ImportError:
_str_accessors = ["fr_str"]
@pytest.fixture(params=_str_accessors, scope="module")
def str_accessor(request):
return request.param
@st.composite
def string_patterns_st(draw, max_len=50) -> Tuple[Sequence[Optional[str]], str, int]:
ab_charset_st = st.sampled_from("ab")
ascii_charset_st = st.sampled_from(string.ascii_letters)
charset_st = st.sampled_from((ab_charset_st, ascii_charset_st))
charset = draw(charset_st)
fixed_pattern_st = st.sampled_from(["a", "aab", "aabaa"])
generated_pattern_st = st.text(alphabet=charset, max_size=max_len)
pattern_st = st.one_of(fixed_pattern_st, generated_pattern_st)
pattern = draw(pattern_st)
min_str_size = 0 if len(pattern) > 0 else 1
raw_str_st = st.one_of(
st.none(), st.lists(charset, min_size=min_str_size, max_size=max_len)
)
raw_seq_st = st.lists(raw_str_st, max_size=max_len)
raw_seq = draw(raw_seq_st)
for s in raw_seq:
if s is None:
continue
"""
There seems to be a bug in pandas for this edge case
>>> pd.Series(['']).str.replace('', 'abc', n=1)
0
dtype: object
But
>>> pd.Series(['']).str.replace('', 'abc')
0 abc
dtype: object
I believe the second result is the correct one and this is what the
fletcher implementation returns.
"""
max_ind = len(s) - len(pattern)
if max_ind < 0:
continue
repl_ind_st = st.integers(min_value=0, max_value=max_ind)
repl_ind_list_st = st.lists(repl_ind_st, max_size=math.ceil(max_len / 10))
repl_ind_list = draw(repl_ind_list_st)
for j in repl_ind_list:
s[j : j + len(pattern)] = pattern
seq = ["".join(s) if s is not None else None for s in raw_seq]
offset = draw(st.integers(min_value=0, max_value=len(seq)))
return (seq, pattern, offset)
string_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", "bb", None], "a"),
(["aa", "ab", "ba", "bb", None], "A"),
(["aa", "ab", "bA", "bB", None], "a"),
(["aa", "AB", "ba", "BB", None], "A"),
],
)
strip_examples = examples(
example_list=[
[],
[""],
[None],
[" "],
["\u2000"],
[" a"],
["a "],
[" a "],
# https://github.com/xhochy/fletcher/issues/174
["\xa0"],
["\u2000a\u2000"],
["\u2000\u200C\u2000"],
["\n\u200C\r"],
["\u2000\x80\u2000"],
["\t\x80\x0b"],
["\u2000\u10FFFF\u2000"],
[" \u10FFFF "],
]
+ [
[c]
for c in " \t\r\n\x1f\x1e\x1d\x1c\x0c\x0b"
"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2000\u2009\u200A\u200B\u2028\u2029\u202F\u205F"
]
+ [[chr(c)] for c in range(0x32)]
+ [[chr(c)] for c in range(0x80, 0x85)]
+ [[chr(c)] for c in range(0x200C, 0x2030)]
+ [[chr(c)] for c in range(0x2060, 0x2070)]
+ [[chr(c)] for c in range(0x10FFFE, 0x110000)],
example_kword="data",
)
def _fr_series_from_data(data, fletcher_variant, dtype=pa.string(), index=None):
arrow_data = pa.array(data, type=dtype)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
return pd.Series(fr_array, index=index)
def _check_series_equal(result_fr, result_pd):
result_fr = result_fr.astype(result_pd.dtype)
tm.assert_series_equal(result_fr, result_pd)
def _check_str_to_t(
t, func, data, str_accessor, fletcher_variant, test_offset=0, *args, **kwargs
):
"""Check a .str. function that returns a series with type t."""
tail_len = len(data) - test_offset
error = None
try:
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, func)(*args, **kwargs)
except Exception as e:
error = e
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
if error:
# If pandas raises an exception, fletcher should do so, too.
with pytest.raises(type(error)):
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
else:
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
_check_series_equal(result_fr, result_pd)
def _check_str_to_str(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(str, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_bool(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(bool, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_int(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(int, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def test_fr_str_accessor(fletcher_array):
data = ["a", "b"]
ser_pd = pd.Series(data)
# object series is returned
s = ser_pd.fr_str.encode("utf8")
assert s.dtype == np.dtype("O")
# test fletcher functionality and fallback to pandas
arrow_data = pa.array(data, type=pa.string())
fr_array = fletcher_array(arrow_data)
ser_fr = pd.Series(fr_array)
# pandas strings only method
s = ser_fr.fr_str.encode("utf8")
assert isinstance(s.values, fr.FletcherBaseArray)
def test_fr_str_accessor_fail(fletcher_variant):
data = [1, 2]
ser_pd = pd.Series(data)
with pytest.raises(Exception):
ser_pd.fr_str.startswith("a")
@settings(deadline=None)
@given(char=st.characters(blacklist_categories=("Cs",)))
def test_utf8_size(char):
char_bytes = char.encode("utf-8")
expected = len(char_bytes)
computed = fr.algorithms.string.get_utf8_size(char_bytes[0])
assert computed == expected
#####################################################
## String accessor methods (sorted alphabetically) ##
#####################################################
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_capitalize(data, str_accessor, fletcher_variant):
_check_str_to_str("capitalize", data, str_accessor, fletcher_variant)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_casefold(data, str_accessor, fletcher_variant):
_check_str_to_str("casefold", data, str_accessor, fletcher_variant)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_cat(data, str_accessor, fletcher_variant, fletcher_variant_2):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
ser_fr = _fr_series_from_data(data, fletcher_variant)
ser_fr_other = _fr_series_from_data(data, fletcher_variant_2)
result_pd = ser_pd.str.cat(ser_pd)
result_fr = getattr(ser_fr, str_accessor).cat(ser_fr_other)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
|
tm.assert_series_equal(result_fr, result_pd)
|
pandas.testing.assert_series_equal
|
# index page
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input,Output,State
import users_mgt as um
from server import app, server
from flask_login import logout_user, current_user
from views import success, login, login_fd, logout
import data_material
import admin
import pandas as pd
import sqlalchemy
from config import engine
import base64
import dash_table
import data_material
import datetime
#new_row = pd.Series(data={'Course_Name':'Python Data Analysis', 'Course_Rating':'', 'Course_Hours':'20','Students':'0'},
#name='{}'.format(len(df.index+1)))
#df = df.append(new_row, ignore_index=False)
#df=df.drop(columns="python_data_analysis")
#df=pd.read_sql_table('user',con='sqlite:///users.db')
#df=df.drop(3)
#df.insert(loc=4,column='Students',value='',allow_duplicates=False)
#df.to_sql("user", con='sqlite:///users.db', if_exists='replace', index=False)
#um.add_course('Python Data Analysis','',20,0)
#um.add_course('Machine Learning','',27,0)
#um.edit_sql_cell('python_data_analysis','student_id',1,1)
#df.insert(loc=0,column='student_id',value='1',allow_duplicates=False)
#df.set_index('student_id',inplace=True)
#df.to_sql("python_data_analysis", con='sqlite:///users.db', if_exists='replace', index=True,index_label='student_id')
#df.to_sql("courses", con='sqlite:///users.db', if_exists='replace', index=False)
#index=df.index[df['id'] == '2'].tolist()
#print( um.read_sql_cell('user','username', index[0] ))
#print(len(df.index))
#print(df)
#https://kanoki.org/2019/04/12/pandas-how-to-get-a-cell-value-and-update-it/
#https://www.youtube.com/watch?v=skGwKh1dAdk
encoded = base64.b64encode(open('logo.png', 'rb').read())
logo_img=dbc.Row([dbc.Col([
html.Img(src='data:image/png;base64,{}'.format(encoded.decode()), id='logo_img', height=80)
] ,
xs=dict(size=12,offset=0), sm=dict(size=12,offset=0),
md=dict(size=12,offset=0), lg=dict(size=12,offset=0), xl=dict(size=12,offset=0))
])
encoded2 = base64.b64encode(open('bg4.jpg', 'rb').read())
bg_img=html.Img(src='data:image/png;base64,{}'.format(encoded2.decode()), id='bg_img', height='800rem',width='100%')
header_text=html.Div('Learning Made Easy',style=dict(color='black',
fontWeight='bold',fontSize='1.4rem',marginTop='1rem',marginLeft='3rem'))
please_login_text=html.Div('Please login to continue..',style=dict(color='black',
fontWeight='bold',fontSize='1.4rem',marginTop='1rem',marginLeft='3rem'))
logout_msg=html.Div(id='logout')
search_input=dbc.Input(id="input", placeholder="Search here..", type="text",bs_size="lg",
style=dict(marginTop='1rem',fontSize='1.1rem'))
search_button= dbc.Button("Search", color="primary", size='lg', n_clicks=0,
style=dict(marginTop='1rem',fontSize='1.1rem'))
logout_button= dbc.Button("Logout", color="primary", size='md', n_clicks=0,id='logout_btn',
style=dict(marginTop='0.3rem',fontSize='1.1rem',marginLeft='2.5rem'))
db_logo_img=dbc.Col([ logo_img] ,
xs=dict(size=2,offset=0), sm=dict(size=2,offset=0),
md=dict(size=2,offset=0), lg=dict(size=2,offset=0), xl=dict(size=1,offset=0))
db_header_text= dbc.Col([ header_text] ,
xs=dict(size=8,offset=0), sm=dict(size=8,offset=0),
md=dict(size=2,offset=0), lg=dict(size=3,offset=0), xl=dict(size=3,offset=0))
db_search_input=dbc.Col([search_input],
xs=dict(size=5, offset=2), sm=dict(size=5, offset=2),
md=dict(size=2, offset=2), lg=dict(size=2, offset=2), xl=dict(size=2, offset=1))
db_search_button=dbc.Col([search_button],
xs=dict(size=2, offset=0), sm=dict(size=2, offset=0),
md=dict(size=2, offset=0), lg=dict(size=2, offset=0), xl=dict(size=2, offset=0))
db_please_login_text= dbc.Col([ please_login_text] ,
xs=dict(size=8,offset=0), sm=dict(size=8,offset=0),
md=dict(size=2,offset=0), lg=dict(size=3,offset=0), xl=dict(size=3,offset=0))
data_progress=dbc.Progress(children=[], max=100, striped=True, color="primary",id='progress',
style=dict(height='20px',backgroundColor='white',fontWeight='bold'),
bar_style=dict(color='black'))
data_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://res.cloudinary.com/dyd911kmh/image/upload/f_auto,q_auto:best/v1567221927/image_3_ayi4rs.png", top=True),
dbc.CardBody(
[
html.H5("Python Data Analysis", className="card-title",style=dict(color='black')),
html.P(
"using pandas python package to analyze data and make reports",
style=dict(color='black')
),
dbc.Nav([ dbc.NavItem(dbc.NavLink("Details", active=True, href="/data", id='data_details')) ],pills=True)
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
data_course_card_progress=dbc.Col([html.Br(),dbc.CardImg(src="https://res.cloudinary.com/dyd911kmh/image/upload/f_auto,q_auto:best/v1567221927/image_3_ayi4rs.png", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Python Data Analysis", className="card-title",style=dict(color='black')),
html.P(
"using pandas python package to analyze data and make reports",
style=dict(color='black')
),
dbc.Nav([ dbc.NavItem(dbc.NavLink("Details", active=True, href="/data", id='data_details')) ],pills=True),html.Br(),data_progress
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
ml_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://iraqcoders.com/wp-content/uploads/2019/02/emerging-tech_ai_machine-learning-100748222-large.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Machine Learning", className="card-title",style=dict(color='black')),
html.P(
"you will understand how to implement basic machine learning ",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
sql_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://media.onlinecoursebay.com/2019/08/27030502/2488822_25d1-750x405.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("SQL basics", className="card-title",style=dict(color='black')),
html.P(
"you will understand how to deal with different types of databases",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
image_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://images-na.ssl-images-amazon.com/images/I/61gBVmFtNpL.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Image Processing", className="card-title",style=dict(color='black')),
html.P(
"you will understand how to use opencv for image processing",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
iot_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://cdn.mindmajix.com/courses/iot-training.png", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Internet Of Things", className="card-title",style=dict(color='black')),
html.P(
"you will understand how IoT devices and systems works",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
embedded_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://prod-discovery.edx-cdn.org/media/course/image/785cf551-7f66-4350-b736-64a93427b4db-3dcdedbdf99d.small.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Embedded Systems", className="card-title",style=dict(color='black')),
html.P(
"you will learn embedded software techniques using tivac board",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
arch_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://moodle.aaup.edu/pluginfile.php/288902/course/overviewfiles/Computer-Architecture.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Computer Architecture", className="card-title",style=dict(color='black')),
html.P(
"you will learn how memory and cpu works in details",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
web_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://www.onlinecoursereport.com/wp-content/uploads/2020/07/shutterstock_394793860-1024x784.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Web development", className="card-title",style=dict(color='black')),
html.P(
"you will learn to develop website using html,css and javascript",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
courses_layout=dbc.Row([ data_course_card, ml_course_card,sql_course_card,image_course_card,
iot_course_card,embedded_course_card ,arch_course_card,web_course_card
] , no_gutters=False)
rate_button= dbc.Button("Rate Course", color="primary", size='lg', n_clicks=0,id='rate_button',
style=dict(marginTop='1rem',fontSize='1.1rem'))
rate_input= html.Div([dbc.Input(id="rate_input", placeholder="0-5 Stars", type="number",bs_size="lg",
min=1, max=5,
style=dict(fontSize='1.1rem')) ] )
submit_rating_button= dbc.Button("Submit", color="primary", size='lg', n_clicks=0, id='submit_rating_button',
style=dict(marginTop='1rem',fontSize='1.1rem'))
rating_input=dbc.Collapse([ rate_input, submit_rating_button
],id="collapse",is_open=False,
style=dict(border='0.5vh solid black')
)
rate_div=html.Div([rate_button,html.Br(),html.Br(),rating_input
] )
sidebar = html.Div(
[
html.H2("Course Content", className="display-4", style=dict(color='black',fontWeight='bold')),
html.Hr(style=dict(color='black')),
html.P(
"Welcome to the course , you can start your lessons bellow ..",className="lead", style=dict(color='black')
),
dbc.Nav(
[
dbc.NavLink("Session1", href="/data/video1", active="exact",style=dict(fontWeight='bold')),
dbc.NavLink("Session2", href="/data/video2", active="exact",style=dict(fontWeight='bold')),
dbc.NavLink("Session3", href="/data/video3", active="exact",style=dict(fontWeight='bold')),
dbc.NavLink("Session4", href="/data/video4", active="exact",style=dict(fontWeight='bold')),
dbc.NavLink("Session5", href="/data/video5", active="exact",style=dict(fontWeight='bold')),
dbc.NavLink("Session6", href="/data/video6", active="exact",style=dict(fontWeight='bold'))
],
vertical=True,
pills=True,
),rate_div
],
style=dict(backgroundColor='#f0ad4e',height='100%')
)
star_img = 'star.jpg'
encoded = base64.b64encode(open(star_img, 'rb').read())
star_image = html.Img(src='data:image/png;base64,{}'.format(encoded.decode()), id='img1', height=40, width=40)
star_image_div=html.Div(star_image, style=dict(display='inline-block'))
data_course_header=html.Div([html.Br(),html.H1('Python Data analysis and visualization course',style=dict(fontSize=36)),
html.Div(' Rating : 4.5/5',style=dict(fontSize=22,display='inline-block'),id='stars'),
star_image_div,html.Div ('Students : 23',style=dict(fontSize=22),id='students'),
html.Div('Total Hours : 20 hour',style=dict(fontSize=22)),html.Br(),
dbc.Nav([dbc.NavItem(dbc.NavLink("Enroll Course", active=True, href="/data/video1", id='enroll_data'))],
pills=True),html.Br()
] , style=dict(color='white',border='4px #f0ad4e solid'))
data_course_Req= html.Div([html.H1('Requirements',style=dict(fontSize=32)),
html.Div(style=dict(border='3px #f0ad4e solid',width='100%',height='5px')),html.Br(),
html.Div(' 1-Basic math skills',style=dict(fontSize=22)),
html.Div ('2-Basic to Intermediate Python Skills.',style=dict(fontSize=22)),
html.Div ('3-Have a computer (either Mac, Windows, or Linux.',style=dict(fontSize=22))
] , style=dict(color='white'))
data_course_desc=html.Div([html.H1('Description',style=dict(fontSize=32)),
html.Div(style=dict(border='3px #f0ad4e solid',width='100%',height='5px')),
html.Div('Our goal is to provide you with complete preparation. And this course will turn you into a job-ready data analyst.'
' To take you there, we will cover the following fundamental topics extensively.',
style=dict(fontSize=22,color='white')),
html.Div('1- Theory about the field of data analytics',style=dict(fontSize=22,color='white')),
html.Div('2- Basic and Advanced python',style=dict(fontSize=22,color='white')),
html.Div('3- Pandas and Numpy libraries'),
html.Div('4- Data collection ,Cleaning and Visualization',style=dict(fontSize=22,color='white'))
] )
data_video1_youtube=html.Div(children=[
html.Iframe(width="100%%", height="474", src="https://www.youtube.com/embed/nLw1RNvfElg"
, title="YouTube video player"
),
])
data_quiz1_header=html.Div('A graph used in statistics to demonstrate how many of a certain type of variable occurs within a specific range',
style=dict(fontSize=22,color='black',fontWeight='black') )
data_quiz1_choices=dcc.RadioItems(
options=[
{'label': 'Bar plot', 'value': 'bar'},
{'label': 'Histogram ', 'value': 'hist'},
{'label': 'Scatter plot', 'value': 'scat'},
{'label': 'Box plot', 'value': 'box'}
],
value='',labelStyle=dict(display='block',color='black',marginLeft='1rem',fontSize=22),
inputStyle=dict(width='1.2rem',height='1.2rem',marginRight='0.5rem') ,id='data_quiz1_choices',
style=dict(marginLeft='4rem') , persistence=True
)
data_quiz1_submit=dbc.Button("Submit", color="primary", size='lg', n_clicks=0,id='data_quiz1_submit',
style=dict(marginTop='0.3rem',fontSize='1.1rem'))
data_quiz1_answer=html.Div('',style=dict(fontSize=22,color='white',fontWeight='bold'),id='data_quiz1_answer')
data_quiz1=html.Div([ html.H1('Quiz1',style=dict(fontSize=32,color='black')),data_quiz1_header,html.Br(),
html.Hr(style=dict(color='black')) ,data_quiz1_choices
] ,style=dict(backgroundColor='#f0ad4e') )
data_video1_layout=dbc.Row([dbc.Col([html.Br(),sidebar
] ,xl=dict(size=2,offset=0),lg=dict(size=2,offset=0),
md=dict(size=5,offset=0),sm=dict(size=10,offset=1),xs=dict(size=10,offset=1)
) ,
dbc.Col([ html.Br(),data_video1_youtube,html.Br(),html.Br(),
data_quiz1,data_quiz1_submit,html.Br(),data_quiz1_answer
] ,xl=dict(size=5,offset=2),lg=dict(size=5,offset=2),
md=dict(size=3,offset=1),sm=dict(size=10,offset=1),xs=dict(size=10,offset=1) )
] , no_gutters=False )
data_details_layout=dbc.Row([dbc.Col([html.Br(),data_course_header
] ,xl=dict(size=6,offset=1),lg=dict(size=6,offset=1),
md=dict(size=8,offset=1),sm=dict(size=10,offset=1),xs=dict(size=10,offset=1)
) ,
dbc.Col([ html.Br(),data_course_Req,html.Br(),data_course_desc
] ,xl=dict(size=5,offset=1),lg=dict(size=5,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
] , no_gutters=False )
app.layout = html.Div(
[
dbc.Row([ db_logo_img ,db_header_text
] ,no_gutters=False,style=dict(backgroundColor='#f0ad4e'),id='header' )
,
html.Div(id='page-content')
, html.Div( [] , id='page-content2'),
dcc.Location(id='url', refresh=True) , html.Div([''],id='hidden_div1',style=dict(display='none')),
html.Div([''],id='hidden_div2',style=dict(display='none')) ,
dcc.Interval(id='my_interval', interval=1500)
]
)
# <iframe width="843" height="474" src="https://www.youtube.com/embed/nLw1RNvfElg"
# title="YouTube video player" frameborder="0"
# allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
print( df['Course_Rating'][2] )
a=3.5666667
print("%.2f" % round(a,2))
@app.callback([Output('stars','children'),Output('students','children')],
Input('url', 'pathname'))
def update_data_details(path):
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
rating_sum=0
rating_students=0
for rating in range(0,len(df.index) ):
if um.read_sql_cell('python_data_analysis','Course_Rating',rating) != '':
rating_students+=1
rating_sum= int(rating_sum) + int(df['Course_Rating'][rating])
stars_avg=int(rating_sum)/rating_students
students_num=len(df.index)
return (' Rating : {}/5'.format("%.2f" % round(stars_avg,1)),'Students : {}'.format(students_num))
@app.callback([Output('page-content', 'children'),Output('header','children'),Output('page-content2','children')],
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/':
return (login.layout, [db_logo_img , db_header_text,db_please_login_text],[])
elif pathname == '/login':
return (login.layout,[ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/success':
if current_user.is_authenticated:
if current_user.username=='admin':
return ([admin.layout,html.Br(),logout_button,dcc.Location(id='url_login_success', refresh=True)],[],[])
else:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text,logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [ db_logo_img , db_header_text ,
db_search_input,db_search_button,db_username_text] ,[bg_img])
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data_course_table':
if current_user.is_authenticated:
return (admin.layout2, [], [])
else:
return (login_fd.layout, [db_logo_img, db_header_text, db_please_login_text], [])
elif pathname == '/Add_Course':
if current_user.is_authenticated:
return (admin.layout3, [], [])
else:
return (login_fd.layout, [db_logo_img, db_header_text, db_please_login_text], [])
elif pathname == '/logout':
if current_user.is_authenticated:
logout_user()
return (logout.layout, [ db_logo_img , db_header_text ,db_please_login_text],[])
else:
return (logout.layout,[ db_logo_img , db_header_text ] ,db_please_login_text,[] )
#"https://www.youtube.com/embed/ln8dyS2y4Nc"
elif pathname == "/Courses":
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [ db_logo_img , db_header_text ,
db_search_input,db_search_button,db_username_text]
, [courses_layout] )
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_details_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video1':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_video1_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video2':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video2_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video3':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video3_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video4':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video4_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video5':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video5_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video6':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video6_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == "/My-Courses":
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
try:
um.read_sql_cell('python_data_analysis', 'Enrolled', index[0])
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [ db_logo_img , db_header_text ,
db_search_input,db_search_button,db_username_text]
,[data_course_card_progress] )
except:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [ db_logo_img , db_header_text ,
db_search_input,db_search_button,db_username_text]
,[html.H1('you dont have courses yet',
style={'textAlign':'center'})
] )
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
# If the user tries to reach a different page, return a 404 message
return ( html.H1("404: Not found", className="text-danger"), [],[] )
@app.callback(
Output('user-name', 'children'),
[Input('page-content', 'children')])
def cur_user(input1):
if current_user.is_authenticated:
return html.Div('Current user: ' + current_user.username)
# 'User authenticated' return username in get_id()
else:
return ''
@app.callback(
Output('logout', 'children'),
[Input('page-content', 'children')])
def user_logout(input1):
if current_user.is_authenticated:
return html.A('Logout', href='/logout')
else:
return ''
# first input is the button clicks , second input is quiz answer picked up by student
# first output is the msg apear after user enter answer second output is the style of this msg ( color )
@app.callback([Output('data_quiz1_answer', 'children') , Output('data_quiz1_answer', 'style') ],
Input('data_quiz1_submit', 'n_clicks'),State('data_quiz1_choices', 'value') )
def data_quiz1_answer(clicks,answer):
if answer=='hist': # check if answer is the correct answer
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db') #reading course table in database
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist() # reading the id of the current user
ans=um.read_sql_cell('python_data_analysis','quiz1_state',index[0]) # reading the quiz1 answer that is recorded in database
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0]) # reading the course progress for the current user
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 10, '%') # increase the course progress
if ans=='': # check if user already answered the quiz or its the first time
um.edit_sql_cell('python_data_analysis','quiz1_state',index[0],'passed') # update the quiz1 state to passed
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress) # update the course progress in database
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold')) # change the output string
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold')) # user already answered so no update in database only return string
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz1_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz1_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -10, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz1_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz1_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback([Output('data_quiz2_answer', 'children') , Output('data_quiz2_answer', 'style') ],
Input('data_quiz2_submit', 'n_clicks'),State('data_quiz2_choices', 'value') )
def data_quiz2_answer(clicks,answer):
if answer=='pd.Dataframe':
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz2_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis','quiz2_state',index[0],'passed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold'))
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz2_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz2_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz2_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz2_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback([Output('data_quiz3_answer', 'children') , Output('data_quiz3_answer', 'style') ],
Input('data_quiz3_submit', 'n_clicks'),State('data_quiz3_choices', 'value') )
def data_quiz3_answer(clicks,answer):
if answer=='plotly':
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz3_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis','quiz3_state',index[0],'passed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold'))
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz3_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz3_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz3_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz3_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback([Output('data_quiz4_answer', 'children') , Output('data_quiz4_answer', 'style') ],
Input('data_quiz4_submit', 'n_clicks'),State('data_quiz4_choices', 'value') )
def data_quiz4_answer(clicks,answer):
if answer=='line chart':
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz4_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis','quiz4_state',index[0],'passed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold'))
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz4_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz4_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz4_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz4_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback([Output('data_quiz5_answer', 'children') , Output('data_quiz5_answer', 'style') ],
Input('data_quiz5_submit', 'n_clicks'),State('data_quiz5_choices', 'value') )
def data_quiz5_answer(clicks,answer):
if answer=='bootstrap':
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz5_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis','quiz5_state',index[0],'passed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold'))
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz5_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz5_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz5_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz5_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback([Output('data_quiz6_answer', 'children') , Output('data_quiz6_answer', 'style') ],
Input('data_quiz6_submit', 'n_clicks'),State('data_quiz6_choices', 'value') )
def data_quiz6_answer(clicks,answer):
if answer=='callbacks':
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz6_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis','quiz6_state',index[0],'passed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold'))
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz6_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
ans=um.read_sql_cell('python_data_analysis','quiz6_state',index[0])
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0])
new_progress = '{}{}'.format(int(progress.split('%')[0]) -18, '%')
if ans=='':
um.edit_sql_cell('python_data_analysis', 'quiz6_state', index[0], 'failed')
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans=='passed':
um.edit_sql_cell('python_data_analysis','quiz6_state',index[0],'failed')
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
return ('Wrong Answer , Try Again..',dict(fontSize=22,color='red',fontWeight='bold'))
elif ans == 'failed':
return ('Wrong Answer , Try Again..', dict(fontSize=22, color='red', fontWeight='bold'))
@app.callback(
Output("collapse", "is_open"),
[Input("rate_button", "n_clicks"),Input('submit_rating_button',"n_clicks")],
[State("collapse", "is_open"),State("rate_input", "value")],
)
def toggle_collapse(n1,n2, is_open,input_value):
if is_open==False:
if n1:
return True
else:
return False
elif is_open==True:
if n2 and (input_value>=1 and input_value<=5 ):
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
um.edit_sql_cell('python_data_analysis', 'Course_Rating', index[0], input_value)
return False
return True
@app.callback(Output('hidden_div1','children'),
Input('my_interval' , 'n_intervals')
)
def data_enrolled(time):
if current_user.is_authenticated:
df=pd.read_sql_table('python_data_analysis',con='sqlite:///users.db')
index=df.index[df['student_id']=='{}'.format(current_user.id)].tolist()
try:
um.read_sql_cell('python_data_analysis', 'Enrolled', index[0])
return 'enrolled'
except:
return 'not_enrolled'
@app.callback(Output('enroll_data','children'),
Input('hidden_div1','children'))
def data_enrolled2(enroll_state):
if enroll_state== 'enrolled' :
return 'Continue Course'
elif enroll_state== 'not_enrolled':
return 'Enroll Course'
return 'Enroll'
@app.callback(Output('enroll_data','active'),
[Input('enroll_data','n_clicks'), Input('hidden_div1','children') ]
)
def data_enrolled3(enroll_data_btn,enroll_state) :
if enroll_data_btn and enroll_state== 'enrolled':
return True
elif enroll_data_btn and enroll_state== 'not_enrolled':
if current_user.is_authenticated:
um.add_data_student(current_user.id,'yes','0%','','','','','','','','')
return True
return True
@app.callback(
Output('our-table', 'data'),
[Input('Add_Student', 'n_clicks')],
[State('our-table', 'data'),
State('our-table', 'columns')],
prevent_initial_call=True)
def add_student(n_clicks, rows, columns):
if n_clicks > 0:
rows.append({c['id']: '' for c in columns})
return rows
@app.callback(
Output('our-table2', 'data'),
[Input('Add_Student_To_Course', 'n_clicks')],
[State('our-table2', 'data'),
State('our-table2', 'columns')],
prevent_initial_call=True)
def add_student_to_course(n_clicks, rows, columns):
if n_clicks > 0:
rows.append({c['id']: '' for c in columns})
return rows
@app.callback(
[Output('confirm_msg', 'children')],
[Input('Save_To_Database', 'n_clicks')],
[State('our-table', 'data'),State('our-table', 'columns')],
prevent_initial_call=True)
def save_to_database1(clicks,data,columns):
if clicks>0:
df = pd.DataFrame(data,columns=['id','username','email','password'])
df.to_sql("user", con='sqlite:///users.db', if_exists='replace', index=False)
return ['Data saved successfully to Database']
else:
return ['']
@app.callback(
[Output('confirm_msg2', 'children')],
[Input('Save_To_Database2', 'n_clicks')],
[State('our-table2', 'data'),State('our-table2', 'columns')],
prevent_initial_call=True)
def save_to_database2(clicks,data,columns):
if clicks>0:
df = pd.DataFrame(data=data,columns=['student_id','Enrolled','Course_progress','Course_Rating'
,'quiz1_state','quiz2_state','quiz3_state','quiz4_state','quiz5_state','quiz6_state','final_exam_degree'])
print(df)
df.to_sql("python_data_analysis", con='sqlite:///users.db', if_exists='replace', index=False)
return ['Data saved successfully to Database']
else:
return ['']
@app.callback([Output('postgres_datatable', 'children')],
[Input('Refresh1', 'n_clicks')])
def refresh_table1(clicks):
if clicks>=0:
df = pd.read_sql_table('user', con='sqlite:///users.db')
return [dash_table.DataTable(
columns = [
{
'name': str(x),
'id': str(x),
'deletable': False,
} for x in df.columns
],id='our-table',
data=df.to_dict('records'), page_size=50
,style_cell=dict(textAlign= 'center', border= '2px solid black'
,backgroundColor= '#f0ad4e',color='black',fontSize='2vh',fontWeight='bold'),
style_header=dict(backgroundColor= '#0275d8',
fontWeight= 'bold', border= '1px solid black',fontSize='1.5vh'),
editable=True,
row_deletable=True,
filter_action="native",
sort_action="native", # give user capability to sort columns
sort_mode="single", # sort across 'multi' or 'single' columns
page_action='none', # render all of the data at once. No paging.
style_table={'height': '100%', 'overflowY': 'auto'}
)
]
else:
pass
@app.callback([Output('postgres_datatable2', 'children')],
[Input('Refresh2', 'n_clicks')])
def refresh_table2(clicks):
if clicks>=0:
df2 = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
return [dash_table.DataTable(
columns = [
{
'name': str(x),
'id': str(x),
'deletable': False,
} for x in df2.columns
],id='our-table2',
data=df2.to_dict('records'), page_size=50
,style_cell=dict(textAlign= 'center', border= '2px solid black'
,backgroundColor= '#f0ad4e',color='black',fontSize='2vh',fontWeight='bold'),
style_header=dict(backgroundColor= '#0275d8',
fontWeight= 'bold', border= '1px solid black',fontSize='1.5vh'),
editable=True,
row_deletable=True,
filter_action="native",
sort_action="native", # give user capability to sort columns
sort_mode="single", # sort across 'multi' or 'single' columns
page_action='none', # render all of the data at once. No paging.
style_table={'height': '100%', 'overflowY': 'auto'}
)
]
else:
pass
@app.callback([Output('progress', 'children'),Output('progress', 'value')],
[Input('url', 'pathname')])
def update_data_progress(path):
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
try :
um.read_sql_cell('python_data_analysis', 'Enrolled', index[0])
progress = um.read_sql_cell('python_data_analysis', 'Course_progress', index[0])
if progress=='0%':
return ('0%',5)
else:
return ('{}'.format(progress),int(progress.split('%')[0]) )
except:
return('0%',6)
@app.callback(Output('data_quiz1_choices','value'),
Input('url','pathname'),State('data_quiz1_choices','value') )
def update_quiz1_state(path,value):
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
state = um.read_sql_cell('python_data_analysis', 'quiz1_state', index[0])
if state=='passed':
return 'hist'
else:
return value
@app.callback(Output('data_quiz2_choices','value'),
Input('url','pathname'),State('data_quiz2_choices','value' ) )
def update_quiz2_state(path,value):
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
state = um.read_sql_cell('python_data_analysis', 'quiz2_state', index[0])
if state=='passed':
return 'pd.Dataframe'
else:
return value
@app.callback(Output('data_quiz3_choices','value'),
Input('url','pathname') ,State('data_quiz3_choices','value') )
def update_quiz3_state(path,value):
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
state = um.read_sql_cell('python_data_analysis', 'quiz3_state', index[0])
if state=='passed':
return 'plotly'
else:
return value
@app.callback(Output('data_quiz4_choices','value'),
Input('url','pathname'),State('data_quiz4_choices','value' ) )
def update_quiz4_state(path,value):
if current_user.is_authenticated:
df =
|
pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
|
pandas.read_sql_table
|
"""
Base and utility classes for tseries type pandas objects.
"""
from __future__ import annotations
from datetime import datetime
from typing import (
TYPE_CHECKING,
Any,
Callable,
Sequence,
TypeVar,
cast,
final,
)
import warnings
import numpy as np
from pandas._libs import (
NaT,
Timedelta,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
Resolution,
Tick,
parsing,
to_offset,
)
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
cache_readonly,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dtype_equal,
is_integer,
is_list_like,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
_index_shared_docs,
)
from pandas.core.indexes.extension import (
NDArrayBackedExtensionIndex,
inherit_names,
)
from pandas.core.indexes.range import RangeIndex
from pandas.core.tools.timedeltas import to_timedelta
if TYPE_CHECKING:
from pandas import CategoricalIndex
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
_TDT = TypeVar("_TDT", bound="DatetimeTimedeltaMixin")
@inherit_names(
["inferred_freq", "_resolution_obj", "resolution"],
DatetimeLikeArrayMixin,
cache=True,
)
@inherit_names(["mean", "asi8", "freq", "freqstr"], DatetimeLikeArrayMixin)
class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex):
"""
Common ops mixin to support a unified interface datetimelike Index.
"""
_is_numeric_dtype = False
_can_hold_strings = False
_data: DatetimeArray | TimedeltaArray | PeriodArray
freq: BaseOffset | None
freqstr: str | None
_resolution_obj: Resolution
# ------------------------------------------------------------------------
@cache_readonly
def hasnans(self) -> bool:
return self._data._hasna
def equals(self, other: Any) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
elif other.dtype.kind in ["f", "i", "u", "c"]:
return False
elif not isinstance(other, type(self)):
should_try = False
inferable = self._data._infer_matches
if other.dtype == object:
should_try = other.inferred_type in inferable
elif is_categorical_dtype(other.dtype):
other = cast("CategoricalIndex", other)
should_try = other.categories.inferred_type in inferable
if should_try:
try:
other = type(self)(other)
except (ValueError, TypeError, OverflowError):
# e.g.
# ValueError -> cannot parse str entry, or OutOfBoundsDatetime
# TypeError -> trying to convert IntervalIndex to DatetimeIndex
# OverflowError -> Index([very_large_timedeltas])
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
return np.array_equal(self.asi8, other.asi8)
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
except (KeyError, TypeError, ValueError):
return False
return True
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
return super()._convert_tolerance(tolerance, target)
# --------------------------------------------------------------------
# Rendering Methods
def format(
self,
name: bool = False,
formatter: Callable | None = None,
na_rep: str = "NaT",
date_format: str | None = None,
) -> list[str]:
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(
ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, na_rep=na_rep, date_format=date_format)
def _format_with_header(
self, header: list[str], na_rep: str = "NaT", date_format: str | None = None
) -> list[str]:
# matches base class except for whitespace padding and date_format
return header + list(
self._format_native_types(na_rep=na_rep, date_format=date_format)
)
@property
def _formatter_func(self):
return self._data._formatter()
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
# iterating over _attributes prevents us from doing this for PeriodIndex
if attrib == "freq":
freq = self.freqstr
if freq is not None:
freq = repr(freq) # e.g. D -> 'D'
attrs.append(("freq", freq))
return attrs
@Appender(Index._summary.__doc__)
def _summary(self, name=None) -> str:
result = super()._summary(name=name)
if self.freq:
result += f"\nFreq: {self.freqstr}"
return result
# --------------------------------------------------------------------
# Indexing Methods
@final
def _can_partial_date_slice(self, reso: Resolution) -> bool:
# e.g. test_getitem_setitem_periodindex
# History of conversation GH#3452, GH#3931, GH#2369, GH#14826
return reso > self._resolution_obj
# NB: for DTI/PI, not TDI
def _parsed_string_to_bounds(self, reso: Resolution, parsed):
raise NotImplementedError
def _parse_with_reso(self, label: str):
# overridden by TimedeltaIndex
parsed, reso_str = parsing.parse_time_string(label, self.freq)
reso = Resolution.from_attrname(reso_str)
return parsed, reso
def _get_string_slice(self, key: str):
# overridden by TimedeltaIndex
parsed, reso = self._parse_with_reso(key)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
@final
def _partial_date_slice(
self,
reso: Resolution,
parsed: datetime,
):
"""
Parameters
----------
reso : Resolution
parsed : datetime
Returns
-------
slice or ndarray[intp]
"""
if not self._can_partial_date_slice(reso):
raise ValueError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
vals = self._data._ndarray
unbox = self._data._unbox
if self.is_monotonic_increasing:
if len(self) and (
(t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
):
# we are out of range
raise KeyError
# TODO: does this depend on being monotonic _increasing_?
# a monotonic (sorted) series can be sliced
left = vals.searchsorted(unbox(t1), side="left")
right = vals.searchsorted(unbox(t2), side="right")
return slice(left, right)
else:
lhs_mask = vals >= unbox(t1)
rhs_mask = vals <= unbox(t2)
# try to find the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"""
If label is a string, cast it to scalar type according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
if isinstance(label, str):
try:
parsed, reso = self._parse_with_reso(label)
except ValueError as err:
# DTI -> parsing.DateParseError
# TDI -> 'unit abbreviation w/o a number'
# PI -> string cannot be parsed as datetime-like
raise self._invalid_indexer("slice", label) from err
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
# --------------------------------------------------------------------
# Arithmetic Methods
def shift(self: _T, periods: int = 1, freq=None) -> _T:
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
arr = self._data.view()
arr._freq = self.freq
result = arr._time_shift(periods, freq=freq)
return type(self)._simple_new(result, name=self.name)
# --------------------------------------------------------------------
@doc(Index._maybe_cast_listlike_indexer)
def _maybe_cast_listlike_indexer(self, keyarr):
try:
res = self._data._validate_listlike(keyarr, allow_object=True)
except (ValueError, TypeError):
if not isinstance(keyarr, ExtensionArray):
# e.g. we don't want to cast DTA to ndarray[object]
res = com.asarray_tuplesafe(keyarr)
# TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray
else:
res = keyarr
return Index(res, dtype=res.dtype)
class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin):
"""
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
but not PeriodIndex
"""
_data: DatetimeArray | TimedeltaArray
_comparables = ["name", "freq"]
_attributes = ["name", "freq"]
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
_join_precedence = 10
def _with_freq(self, freq):
arr = self._data._with_freq(freq)
return type(self)._simple_new(arr, name=self._name)
def is_type_compatible(self, kind: str) -> bool:
warnings.warn(
f"{type(self).__name__}.is_type_compatible is deprecated and will be "
"removed in a future version.",
FutureWarning,
stacklevel=find_stack_level(),
)
return kind in self._data._infer_matches
@property
def values(self) -> np.ndarray:
# NB: For Datetime64TZ this is lossy
return self._data._ndarray
# --------------------------------------------------------------------
# Set Operation Methods
@cache_readonly
def _as_range_index(self) -> RangeIndex:
# Convert our i8 representations to RangeIndex
# Caller is responsible for checking isinstance(self.freq, Tick)
freq = cast(Tick, self.freq)
tick = freq.delta.value
rng = range(self[0].value, self[-1].value + tick, tick)
return RangeIndex(rng)
def _can_range_setop(self, other):
return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
def _wrap_range_setop(self, other, res_i8):
new_freq = None
if not len(res_i8):
# RangeIndex defaults to step=1, which we don't want.
new_freq = self.freq
elif isinstance(res_i8, RangeIndex):
new_freq = to_offset(Timedelta(res_i8.step))
res_i8 = res_i8
# TODO: we cannot just do
# type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)
# because test_setops_preserve_freq fails with _validate_frequency raising.
# This raising is incorrect, as 'on_freq' is incorrect. This will
# be fixed by GH#41493
res_values = res_i8.values.view(self._data._ndarray.dtype)
result = type(self._data)._simple_new(
res_values, dtype=self.dtype, freq=new_freq
)
return self._wrap_setop_result(other, result)
def _range_intersect(self, other, sort):
# Dispatch to RangeIndex intersection logic.
left = self._as_range_index
right = other._as_range_index
res_i8 = left.intersection(right, sort=sort)
return self._wrap_range_setop(other, res_i8)
def _range_union(self, other, sort):
# Dispatch to RangeIndex union logic.
left = self._as_range_index
right = other._as_range_index
res_i8 = left.union(right, sort=sort)
return self._wrap_range_setop(other, res_i8)
def _intersection(self, other: Index, sort=False) -> Index:
"""
intersection specialized to the case with matching dtypes and both non-empty.
"""
other = cast("DatetimeTimedeltaMixin", other)
if self._can_range_setop(other):
return self._range_intersect(other, sort=sort)
if not self._can_fast_intersect(other):
result = Index._intersection(self, other, sort=sort)
# We need to invalidate the freq because Index._intersection
# uses _shallow_copy on a view of self._data, which will preserve
# self.freq if we're not careful.
# At this point we should have result.dtype == self.dtype
# and type(result) is type(self._data)
result = self._wrap_setop_result(other, result)
return result._with_freq(None)._with_freq("infer")
else:
return self._fast_intersect(other, sort)
def _fast_intersect(self, other, sort):
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
result = self[:0]
else:
lslice = slice(*left.slice_locs(start, end))
result = left._values[lslice]
return result
def _can_fast_intersect(self: _T, other: _T) -> bool:
# Note: we only get here with len(self) > 0 and len(other) > 0
if self.freq is None:
return False
elif other.freq != self.freq:
return False
elif not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
return False
# this along with matching freqs ensure that we "line up",
# so intersection will preserve freq
# Note we are assuming away Ticks, as those go through _range_intersect
# GH#42104
return self.freq.n == 1
def _can_fast_union(self: _T, other: _T) -> bool:
# Assumes that type(self) == type(other), as per the annotation
# The ability to fast_union also implies that `freq` should be
# retained on union.
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
# TODO: do union on the reversed indexes?
return False
if len(self) == 0 or len(other) == 0:
# only reached via union_many
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self: _TDT, other: _TDT, sort=None) -> _TDT:
# Caller is responsible for ensuring self and other are non-empty
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
elif sort is False:
# TDIs are not in the "correct" order and we don't want
# to sort but want to remove overlaps
left, right = self, other
left_start = left[0]
loc = right.searchsorted(left_start, side="left")
right_chunk = right._values[:loc]
dates =
|
concat_compat((left._values, right_chunk))
|
pandas.core.dtypes.concat.concat_compat
|
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import CategoricalIndex, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
IntervalArray,
PandasArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="A"),
PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval",),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Series / DataFrame columns of these types (so
# we get consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that allowing this test to fail is more
# practical that overriding Series._values to special case
# Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.
pytest.param(
pd.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
pd.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize(
"array, expected",
[
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(["0", "1"]), np.array(["0", "1"], dtype=object)),
(pd.Categorical(["a", "a"]), np.array([0, 0], dtype="int8")),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"]),
np.array(["2017-01-01T00:00:00"], dtype="M8[ns]"),
),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"], tz="US/Eastern"),
np.array(["2017-01-01T05:00:00"], dtype="M8[ns]"),
),
(pd.TimedeltaIndex([10 ** 10]), np.array([10 ** 10], dtype="m8[ns]")),
(
pd.PeriodIndex(["2017", "2018"], freq="D"),
np.array([17167, 17532], dtype=np.int64),
),
],
)
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = pd.Series(arr)
result = ser.array
expected = PandasArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
ser = pd.Series(dtype=any_numpy_dtype)
result = ser.array
if is_datetime64_dtype(any_numpy_dtype):
assert isinstance(result, DatetimeArray)
elif is_timedelta64_dtype(any_numpy_dtype):
assert isinstance(result, TimedeltaArray)
else:
assert isinstance(result, PandasArray)
@pytest.mark.parametrize(
"array, attr",
[
(pd.Categorical(["a", "b"]), "_codes"),
(pd.core.arrays.period_array(["2000", "2001"], freq="D"), "_data"),
(pd.core.arrays.integer_array([0, np.nan]), "_data"),
(IntervalArray.from_breaks([0, 1]), "_left"),
(SparseArray([0, 1]), "_sparse_values"),
(DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_data"),
# tz-aware Datetime
(
DatetimeArray(
np.array(
["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
"_data",
),
],
)
def test_array(array, attr, index_or_series):
box = index_or_series
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip(f"No index type for {array.dtype}")
result = box(array, copy=False).array
if attr:
array = getattr(array, attr)
result = getattr(result, attr)
assert result is array
def test_array_multiindex_raises():
idx = pd.MultiIndex.from_product([["A"], ["a", "b"]])
with pytest.raises(ValueError, match="MultiIndex"):
idx.array
@pytest.mark.parametrize(
"array, expected",
[
(np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)),
(pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object)),
(
pd.core.arrays.period_array(["2000", "2001"], freq="D"),
np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]),
),
(
pd.core.arrays.integer_array([0, np.nan]),
np.array([0, pd.NA], dtype=object),
),
(
IntervalArray.from_breaks([0, 1, 2]),
np.array([pd.Interval(0, 1),
|
pd.Interval(1, 2)
|
pandas.Interval
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
preprocessing_yesno.py
Created on Thu May 3 00:15:48 2018
This code saves:
- apply zero padding to the first 48,000 samples
- [mu-law encoded audio] to <out_filedir>/enc
- [mfcc] to <out_filedir>/mfcc
- NOT IMPLEMENTED YET ([f0] to <out_filedir>/f0 *)
@author: sungkyun
"""
import argparse
import numpy as np
import pandas as pd # required for generating .csv files
import librosa # required for audio pre-processing, loading mp3 (sudo apt-get install libav-tools)
import glob, os # required for obtaining test file ID
from util.utils import mu_law_encode
#%% Argument Parser
parser = argparse.ArgumentParser(description='Audio Preprocessing for yesno dataset')
parser.add_argument('-sr', '--sr', type=int, default=16000, metavar='N',
help='target sampling rate, default 16000')
parser.add_argument('-zp', '--zero_pad', type=int, default=48000, metavar='N',
help='target sampling rate, default 48000')
parser.add_argument('-i', '--input_filedir', type=str, default='data/waves_yesno/', metavar='N',
help='input source dataset directory, default=data/waves_yesno/')
parser.add_argument('-o', '--out_filedir', type=str, default='data/processed_yesno/', metavar='N',
help='output file directory(root of .wav subdirectories and .csv file), default=data/processed_yesno/')
args = parser.parse_args()
input_file_dir = args.input_filedir
output_file_dir = args.out_filedir
#%% Function def.
def displayFeat(x_spec=np.ndarray):
import matplotlib.pyplot as plt
import librosa.display
plt.figure(figsize=(10, 4))
librosa.display.specshow(x_spec, x_axis='time')
return 0
#%% Preprocessing --> save <u-enc> <mfcc> as .npy
input_file_paths = sorted(glob.glob(input_file_dir + '*.wav'))
file_ids = [path.split('/')[-1][:-4] for path in input_file_paths]
# Load audio -> mono -> resample -> mfcc -> save
if not os.path.exists(output_file_dir):
os.makedirs(output_file_dir)
if not os.path.exists(output_file_dir + 'mulaw/'):
os.makedirs(output_file_dir + 'mulaw/')
if not os.path.exists(output_file_dir + 'mfcc/'):
os.makedirs(output_file_dir + 'mfcc/')
total_input_files = len(input_file_paths)
for i in range(total_input_files):
x_raw, sr = librosa.load(input_file_paths[i], sr=args.sr, mono=True) # Normalize?
x_raw = np.pad(x_raw, (args.zero_pad,0), mode='constant') # padding first 48,000 samples with zeros
#x_spec = librosa.feature.melspectrogram(y=x_raw, sr=sr, power=2.0, n_fft = 400, hop_length=160, n_mels=128)
x_spec = librosa.feature.melspectrogram(y=x_raw, sr=sr, power=2.0, n_fft = 400, hop_length=1, n_mels=128)
x_mfcc = librosa.feature.mfcc(S=librosa.power_to_db(x_spec), sr=args.sr, n_mfcc=25)
# displayFeat(x_spec); displayFeat(x_mfcc)
if x_mfcc.shape[1] > len(x_raw):
x_mfcc = x_mfcc[:,0:len(x_raw)]
elif x_mfcc.shape[1] < len(x_raw):
x_raw = x_raw[0:x_mfcc.shape[1]]
x_mulaw = mu_law_encode(x_raw)
# Save mulaw
save_file_path_mulaw = output_file_dir + 'mulaw/' + file_ids[i] + '.npy'
np.save(save_file_path_mulaw, x_mulaw.astype('uint8'))
# Save mfcc
save_file_path_mfcc = output_file_dir + 'mfcc/' + file_ids[i] + '.npy'
np.save(save_file_path_mfcc, x_mfcc)
print('Preprocessing: {} files completed.'.format(total_input_files))
#%% Train/test split --> generate .csv
# Train/test split : 54 files for train, 6 files for test
test_id_sel = [5,11,22,38,43,55]
train_id_sel = list(set(range(60)).difference(set(test_id_sel)))
# Prepare pandas dataframes
df_test =
|
pd.DataFrame(columns=('file_id', 'mulaw_filepath', 'mfcc_filepath'))
|
pandas.DataFrame
|
"""Load a model and evaluate its performance against an unknown test set"""
import glob
import logging
import os
import re
import sqlite3
from pathlib import Path
import configargparse
import keras.models
import numpy as np
import pandas
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report, confusion_matrix
IMG_DIM = (201, 201)
def evaluate(
model_file: str,
test_dir: str,
output_dir: str,
sample_lable_lst: str,
slices_per_structure: int = 60,
rgb: bool = False,
):
# Load model
try:
model = keras.models.load_model(model_file)
except Exception:
logging.error(f"Failed to load model from {model_file}")
raise
logging.info(f"Model loaded from {model_file}")
# Get test files prepared
# Load files
try:
test_files = glob.glob(f"{test_dir}/*")
logging.info(f"Found {len(test_files)} files for testing")
assert len(test_files) > 0, f"Could not find files at {test_dir}"
assert (
len(test_files) % slices_per_structure == 0
), f"Number of test files is not an exact multiple of slices per structure"
except AssertionError as e:
logging.error(e)
raise
except Exception as e:
logging.error(e)
raise
# Read table into pandas dataframe
# Load data CSV file with filenames and labels
print(sample_lable_lst)
data =
|
pandas.read_csv(sample_lable_lst)
|
pandas.read_csv
|
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
|
pd.timedelta_range("1 day", periods=3)
|
pandas.timedelta_range
|
import os, pathlib
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from scipy import io
import scipy.io.wavfile
from tensorflow.keras import layers
from tensorflow.keras import models
from ctypes import cdll, c_short, POINTER
def get_waveform(file_path):
_, waveform = scipy.io.wavfile.read(file_path)
waveform = np.round(waveform)
waveform = np.clip(waveform, -32768, 32767)
return waveform
def interval_fix_fft(w, step, m, n_fft_features, fpath='libraries/fix_fft_32k_dll/fix_fft_32k.so'):
ff = cdll.LoadLibrary(fpath)
ff.fix_fft.argtypes = [POINTER(c_short), POINTER(c_short), c_short, c_short]
nsteps = len(w) // step
w = tf.cast(w, tf.int32)
intervals = np.split(w, nsteps)
def fix_fft(re):
im = [0 for _ in range(step)]
re_c = (c_short * step)(*re)
im_c = (c_short * step)(*im)
ff.fix_fft(re_c, im_c, c_short(m), c_short(0))
s = np.zeros(n_fft_features)
for i in range(n_fft_features):
s[i] = np.round(np.sqrt(re_c[i] * re_c[i] + im_c[i] * im_c[i]) // 2)
return s
#print(intervals[0])
mgn = map(fix_fft, intervals)
#print(intervals[0])
#print('------------------------------')
return np.hstack(mgn)
def get_spectrogram(waveform, input_len=15232): # 15872
waveform = waveform[:input_len]
zero_padding = np.zeros(input_len - len(waveform))
equal_length = np.hstack([waveform, zero_padding])
#spectrogram = interval_fix_fft(equal_length, 512, 6, 33)
spectrogram = interval_fix_fft(equal_length, 2176, 6, 33)
return spectrogram
def equalize_numbers(X, Y):
ulabels = np.unique(Y)
uids = [np.where(Y == ul)[0] for ul in ulabels]
un = [len(uidx) for uidx in uids]
nl = np.max(un)
X_new = np.vstack([X[np.random.choice(uidx, nl), :] for uidx in uids])
Y_new = np.hstack([Y[np.random.choice(uidx, nl)] for uidx in uids])
return X_new, Y_new
if __name__ == '__main__':
labels = ['no', 'yes', 'other']
# seed
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
# download data
DATASET_PATH = 'data/mini_speech_commands'
data_dir = pathlib.Path(DATASET_PATH)
if not data_dir.exists():
tf.keras.utils.get_file(
'mini_speech_commands.zip',
origin="http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip",
extract=True,
cache_dir='.', cache_subdir='data'
)
# save labels
commands = [item for item in os.listdir(data_dir) if os.path.isdir(data_dir.joinpath(item))]
assert labels[-1] not in commands
print('Labels:', labels)
with open(f'{DATASET_PATH}/labels.txt', 'w') as f:
f.write(','.join(labels))
# fpath
features_fpath = f'{DATASET_PATH}/features_nano.csv'
minmax_fpath = f'{DATASET_PATH}/minmax_nano.csv'
silence_fpath = f'{DATASET_PATH}/silence.csv'
# preprocess data
try:
features_and_labels =
|
pd.read_csv(features_fpath, header=None)
|
pandas.read_csv
|
import os
import requests
import pandas as pd
from pandas.core.frame import DataFrame
from lxml import etree
from bs4 import BeautifulSoup as BS4
# Lifetables
# The lifetables data is available as html files on the SSA website. We will scrape the data,
# parse it and combine all lifetables into a single dataframe and save it as a csv file.
def parse_html_table(table):
"""
Parse table and filter out rows having child elements ('div') having a class attribute 'pCellBodyR' containing data
"""
trs = table.find_all('tr')
print(len(trs))
my_table = []
for tr in trs:
testRow = etree.HTML(str(tr))
# Catch row containing div containing data only
dataDivs = testRow.xpath('//*[@class=\'pCellBodyR\']')
repr(dataDivs)
if len(dataDivs) == 14:
my_row = []
for dataDiv in dataDivs:
value = dataDiv.text.replace('\n','').replace(',', '')
if int(float(value)) == float(value):
my_row.append(int(float(value)))
else:
my_row.append(float(value))
my_table.append(my_row)
return pd.DataFrame(my_table)
def parse_lifetable(lifetable: DataFrame, year):
"""
Parse extracted lifetable into a pandas dataframe
"""
lifetable_male = lifetable.iloc[:,:7].assign(sex = 'M')
df_male =
|
pd.DataFrame(lifetable_male)
|
pandas.DataFrame
|
"""
Quantilization functions and related stuff
"""
from functools import partial
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas.core.dtypes.common import (
ensure_int64, is_categorical_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_integer,
is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.missing import isna
from pandas import (
Categorical, Index, Interval, IntervalIndex, Series, Timedelta, Timestamp,
to_datetime, to_timedelta)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False, duplicates='raise'):
"""
Bin values into discrete intervals.
Use `cut` when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable. For example, `cut` could convert ages to groups of
age ranges. Supports binning into an equal number of bins, or a
pre-specified array of bins.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or pandas.IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used.
right : bool, default True
Indicates whether `bins` includes the rightmost edge or not. If
``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
indicate (1,2], (2,3], (3,4]. This argument is ignored when
`bins` is an IntervalIndex.
labels : array or bool, optional
Specifies the labels for the returned bins. Must be the same length as
the resulting bins. If False, returns only integer indicators of the
bins. This affects the type of the output container (see below).
This argument is ignored when `bins` is an IntervalIndex.
retbins : bool, default False
Whether to return the bins or not. Useful when bins is provided
as a scalar.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.23.0
Returns
-------
out : pandas.Categorical, Series, or ndarray
An array-like object representing the respective bin for each value
of `x`. The type depends on the value of `labels`.
* True (default) : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are Interval dtype.
* sequence of scalars : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are whatever the type in the sequence is.
* False : returns an ndarray of integers.
bins : numpy.ndarray or IntervalIndex.
The computed or specified bins. Only returned when `retbins=True`.
For scalar or sequence `bins`, this is an ndarray with the computed
bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
an IntervalIndex `bins`, this is equal to `bins`.
See Also
--------
qcut : Discretize variable into equal-sized buckets based on rank
or based on sample quantiles.
pandas.Categorical : Array type for storing data that come from a
fixed set of values.
Series : One-dimensional array with axis labels (including time series).
pandas.IntervalIndex : Immutable Index implementing an ordered,
sliceable set.
Notes
-----
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Series or pandas.Categorical object.
Examples
--------
Discretize into three equal-sized bins.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
... # doctest: +ELLIPSIS
[(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
array([0.994, 3. , 5. , 7. ]))
Discovers the same bins, but assign them specific labels. Notice that
the returned Categorical's categories are `labels` and is ordered.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
... 3, labels=["bad", "medium", "good"])
[bad, good, medium, medium, good, bad]
Categories (3, object): [bad < medium < good]
``labels=False`` implies you just want the bins back.
>>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
array([0, 1, 1, 3])
Passing a Series as an input returns a Series with categorical dtype:
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, 3)
... # doctest: +ELLIPSIS
a (1.992, 4.667]
b (1.992, 4.667]
c (4.667, 7.333]
d (7.333, 10.0]
e (7.333, 10.0]
dtype: category
Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ...
Passing a Series as an input returns a Series with mapping value.
It is used to map numerically to intervals based on bins.
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 4.0
dtype: float64, array([0, 2, 4, 6, 8]))
Use `drop` optional when bins is not unique
>>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
... right=False, duplicates='drop')
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 3.0
dtype: float64, array([0, 2, 4, 6, 8]))
Passing an IntervalIndex for `bins` results in those categories exactly.
Notice that values not covered by the IntervalIndex are set to NaN. 0
is to the left of the first bin (which is closed on the right), and 1.5
falls between two bins.
>>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
>>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
[NaN, (0, 1], NaN, (2, 3], (4, 5]]
Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x),
|
nanops.nanmax(x)
|
pandas.core.nanops.nanmax
|
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from etna.datasets import generate_ar_df
from etna.datasets.tsdataset import TSDataset
from etna.transforms import DateFlagsTransform
from etna.transforms import TimeSeriesImputerTransform
@pytest.fixture()
def tsdf_with_exog(random_seed) -> TSDataset:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_1["segment"] = "Moscow"
df_1["target"] = [x ** 2 + np.random.uniform(-2, 2) for x in list(range(len(df_1)))]
df_2["segment"] = "Omsk"
df_2["target"] = [x ** 0.5 + np.random.uniform(-2, 2) for x in list(range(len(df_2)))]
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = classic_df.pivot(index="timestamp", columns="segment")
df = df.reorder_levels([1, 0], axis=1)
df = df.sort_index(axis=1)
df.columns.names = ["segment", "feature"]
exog = generate_ar_df(start_time="2021-01-01", periods=600, n_segments=2)
exog = exog.pivot(index="timestamp", columns="segment")
exog = exog.reorder_levels([1, 0], axis=1)
exog = exog.sort_index(axis=1)
exog.columns.names = ["segment", "feature"]
exog.columns = pd.MultiIndex.from_arrays([["Moscow", "Omsk"], ["exog", "exog"]])
ts = TSDataset(df=df, df_exog=exog, freq="1D")
return ts
@pytest.fixture()
def df_and_regressors() -> Tuple[pd.DataFrame, pd.DataFrame]:
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
df_1 = pd.DataFrame({"timestamp": timestamp, "regressor_1": 1, "regressor_2": 2, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "regressor_1": 3, "regressor_2": 4, "segment": "2"})
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog = TSDataset.to_dataset(df_exog)
return df, df_exog
@pytest.fixture()
def ts_future(example_reg_tsds):
future = example_reg_tsds.make_future(10)
return future
def test_check_endings_error_raise():
"""Check that _check_endings method raises exception if some segments end with nan."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[:-5], "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df=df, freq="D")
with pytest.raises(ValueError):
ts._check_endings()
def test_check_endings_error_pass():
"""Check that _check_endings method passes if there is no nans at the end of all segments."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df=df, freq="D")
ts._check_endings()
def test_categorical_after_call_to_pandas():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["categorical_column"] = [0] * 30 + [1] * 30
classic_df["categorical_column"] = classic_df["categorical_column"].astype("category")
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
exog = TSDataset.to_dataset(classic_df[["timestamp", "segment", "categorical_column"]])
ts = TSDataset(df, "D", exog)
flatten_df = ts.to_pandas(flatten=True)
assert flatten_df["categorical_column"].dtype == "category"
@pytest.mark.parametrize(
"borders, true_borders",
(
(
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
),
(
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
),
(
("2021-02-01", "2021-06-20", "2021-06-21", "2021-06-28"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-06-28"),
),
(
("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01"),
),
((None, "2021-06-20", "2021-06-23", "2021-06-28"), ("2021-02-01", "2021-06-20", "2021-06-23", "2021-06-28")),
(("2021-02-03", "2021-06-20", "2021-06-23", None), ("2021-02-03", "2021-06-20", "2021-06-23", "2021-07-01")),
((None, "2021-06-20", "2021-06-23", None), ("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01")),
((None, "2021-06-20", None, None), ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
((None, None, "2021-06-21", None), ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
),
)
def test_train_test_split(borders, true_borders, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end
)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"test_size, true_borders",
(
(11, ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
(9, ("2021-02-01", "2021-06-22", "2021-06-23", "2021-07-01")),
(1, ("2021-02-01", "2021-06-30", "2021-07-01", "2021-07-01")),
),
)
def test_train_test_split_with_test_size(test_size, true_borders, tsdf_with_exog):
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(test_size=test_size)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"test_size, borders, true_borders",
(
(
10,
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
),
(
15,
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
),
(11, ("2021-02-02", None, None, "2021-06-28"), ("2021-02-02", "2021-06-17", "2021-06-18", "2021-06-28")),
(
4,
("2021-02-03", "2021-06-20", None, "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-28", "2021-07-01"),
),
(
4,
("2021-02-03", "2021-06-20", None, None),
("2021-02-03", "2021-06-20", "2021-06-21", "2021-06-24"),
),
),
)
def test_train_test_split_both(test_size, borders, true_borders, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"borders, match",
(
(("2021-01-01", "2021-06-20", "2021-06-21", "2021-07-01"), "Min timestamp in df is"),
(("2021-02-01", "2021-06-20", "2021-06-21", "2021-08-01"), "Max timestamp in df is"),
),
)
def test_train_test_split_warning(borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.warns(UserWarning, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end
)
@pytest.mark.parametrize(
"test_size, borders, match",
(
(
10,
("2021-02-01", None, "2021-06-21", "2021-07-01"),
"test_size, test_start and test_end cannot be applied at the same time. test_size will be ignored",
),
),
)
def test_train_test_split_warning2(test_size, borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.warns(UserWarning, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
@pytest.mark.parametrize(
"test_size, borders, match",
(
(
None,
("2021-02-03", None, None, "2021-07-01"),
"At least one of train_end, test_start or test_size should be defined",
),
(
17,
("2021-02-01", "2021-06-20", None, "2021-07-01"),
"The beginning of the test goes before the end of the train",
),
(
17,
("2021-02-01", "2021-06-20", "2021-06-26", None),
"test_size is 17, but only 6 available with your test_start",
),
),
)
def test_train_test_split_failed(test_size, borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.raises(ValueError, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
def test_dataset_datetime_conversion():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["timestamp"] = classic_df["timestamp"].astype(str)
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
# todo: deal with pandas datetime format
assert df.index.dtype == "datetime64[ns]"
def test_dataset_datetime_conversion_during_init():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["categorical_column"] = [0] * 30 + [1] * 30
classic_df["categorical_column"] = classic_df["categorical_column"].astype("category")
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
exog = TSDataset.to_dataset(classic_df[["timestamp", "segment", "categorical_column"]])
df.index = df.index.astype(str)
exog.index = df.index.astype(str)
ts = TSDataset(df, "D", exog)
assert ts.df.index.dtype == "datetime64[ns]"
def test_make_future_raise_error_on_diff_endings(ts_diff_endings):
with pytest.raises(ValueError, match="All segments should end at the same timestamp"):
ts_diff_endings.make_future(10)
def test_make_future_with_imputer(ts_diff_endings, ts_future):
imputer = TimeSeriesImputerTransform(in_column="target")
ts_diff_endings.fit_transform([imputer])
future = ts_diff_endings.make_future(10)
assert_frame_equal(future.df, ts_future.df)
def test_make_future():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 1, "segment": "segment_1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 2, "segment": "segment_2"})
df = pd.concat([df1, df2], ignore_index=False)
ts = TSDataset(TSDataset.to_dataset(df), freq="D")
ts_future = ts.make_future(10)
assert np.all(ts_future.index == pd.date_range(ts.index.max() + pd.Timedelta("1D"), periods=10, freq="D"))
assert set(ts_future.columns.get_level_values("feature")) == {"target"}
def test_make_future_small_horizon():
timestamp = np.arange(np.datetime64("2021-01-01"), np.datetime64("2021-02-01"))
target1 = [np.sin(i) for i in range(len(timestamp))]
target2 = [np.cos(i) for i in range(len(timestamp))]
df1 = pd.DataFrame({"timestamp": timestamp, "target": target1, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": target2, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df, freq="D")
train = TSDataset(ts[: ts.index[10], :, :], freq="D")
with pytest.warns(UserWarning, match="TSDataset freq can't be inferred"):
assert len(train.make_future(1).df) == 1
def test_make_future_with_exog():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 1, "segment": "segment_1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 2, "segment": "segment_2"})
df = pd.concat([df1, df2], ignore_index=False)
exog = df.copy()
exog.columns = ["timestamp", "exog", "segment"]
ts = TSDataset(df=TSDataset.to_dataset(df), df_exog=TSDataset.to_dataset(exog), freq="D")
ts_future = ts.make_future(10)
assert np.all(ts_future.index == pd.date_range(ts.index.max() + pd.Timedelta("1D"), periods=10, freq="D"))
assert set(ts_future.columns.get_level_values("feature")) == {"target", "exog"}
def test_make_future_with_regressors(df_and_regressors):
df, df_exog = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D")
ts_future = ts.make_future(10)
assert np.all(ts_future.index == pd.date_range(ts.index.max() + pd.Timedelta("1D"), periods=10, freq="D"))
assert set(ts_future.columns.get_level_values("feature")) == {"target", "regressor_1", "regressor_2"}
@pytest.mark.parametrize("exog_starts_later,exog_ends_earlier", ((True, False), (False, True), (True, True)))
def test_dataset_check_exog_raise_error(exog_starts_later: bool, exog_ends_earlier: bool):
start_time = "2021-01-10" if exog_starts_later else "2021-01-01"
end_time = "2021-01-20" if exog_ends_earlier else "2021-02-01"
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range(start_time, end_time)
df1 = pd.DataFrame({"timestamp": timestamp, "regressor_aaa": 1, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[5:], "regressor_aaa": 2, "segment": "2"})
dfexog = pd.concat([df1, df2], ignore_index=True)
dfexog = TSDataset.to_dataset(dfexog)
with pytest.raises(ValueError):
TSDataset._check_regressors(df=df, df_exog=dfexog)
def test_dataset_check_exog_pass(df_and_regressors):
df, df_exog = df_and_regressors
_ = TSDataset._check_regressors(df=df, df_exog=df_exog)
def test_warn_not_enough_exog(df_and_regressors):
"""Check that warning is thrown if regressors don't have enough values."""
df, df_exog = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D")
with pytest.warns(UserWarning, match="Some regressors don't have enough values"):
ts.make_future(ts.df_exog.shape[0] + 100)
def test_getitem_only_date(tsdf_with_exog):
df_date_only = tsdf_with_exog["2021-02-01"]
assert df_date_only.name == pd.Timestamp("2021-02-01")
pd.testing.assert_series_equal(tsdf_with_exog.df.loc["2021-02-01"], df_date_only)
def test_getitem_slice_date(tsdf_with_exog):
df_slice = tsdf_with_exog["2021-02-01":"2021-02-03"]
expected_index = pd.DatetimeIndex(pd.date_range("2021-02-01", "2021-02-03"), name="timestamp")
pd.testing.assert_index_equal(df_slice.index, expected_index)
pd.testing.assert_frame_equal(tsdf_with_exog.df.loc["2021-02-01":"2021-02-03"], df_slice)
def test_getitem_second_ellipsis(tsdf_with_exog):
df_slice = tsdf_with_exog["2021-02-01":"2021-02-03", ...]
expected_index = pd.DatetimeIndex(pd.date_range("2021-02-01", "2021-02-03"), name="timestamp")
pd.testing.assert_index_equal(df_slice.index, expected_index)
pd.testing.assert_frame_equal(tsdf_with_exog.df.loc["2021-02-01":"2021-02-03"], df_slice)
def test_getitem_first_ellipsis(tsdf_with_exog):
df_slice = tsdf_with_exog[..., "target"]
df_expected = tsdf_with_exog.df.loc[:, [["Moscow", "target"], ["Omsk", "target"]]]
pd.testing.assert_frame_equal(df_expected, df_slice)
def test_getitem_all_indexes(tsdf_with_exog):
df_slice = tsdf_with_exog[:, :, :]
df_expected = tsdf_with_exog.df
pd.testing.assert_frame_equal(df_expected, df_slice)
def test_finding_regressors(df_and_regressors):
"""Check that ts.regressors property works correctly."""
df, df_exog = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D")
assert sorted(ts.regressors) == ["regressor_1", "regressor_2"]
def test_head_default(tsdf_with_exog):
assert np.all(tsdf_with_exog.head() == tsdf_with_exog.df.head())
def test_tail_default(tsdf_with_exog):
np.all(tsdf_with_exog.tail() == tsdf_with_exog.df.tail())
def test_updating_regressors_fit_transform(df_and_regressors):
"""Check that ts.regressors is updated after making ts.fit_transform()."""
df, df_exog = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D")
date_flags_transform = DateFlagsTransform(
day_number_in_week=True,
day_number_in_month=False,
week_number_in_month=False,
week_number_in_year=False,
month_number_in_year=False,
year_number=False,
is_weekend=True,
out_column="regressor_dateflag",
)
initial_regressors = set(ts.regressors)
ts.fit_transform(transforms=[date_flags_transform])
final_regressors = set(ts.regressors)
expected_columns = {"regressor_dateflag_day_number_in_week", "regressor_dateflag_is_weekend"}
assert initial_regressors.issubset(final_regressors)
assert final_regressors.difference(initial_regressors) == expected_columns
def test_right_format_sorting():
"""Need to check if to_dataset method does not mess up with data and column names,
sorting it with no respect to each other
"""
df = pd.DataFrame({"timestamp":
|
pd.date_range("2020-01-01", periods=100)
|
pandas.date_range
|
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
|
pd.Period('2013-01-02', freq='D')
|
pandas.Period
|
# encoding: utf-8
# copyright: GeoDS Lab, University of Wisconsin-Madison
# authors: <NAME>, <NAME>, <NAME>
import pandas as pd
import numpy as np
import os
import geopandas as gpd
import argparse
import datetime
parser = argparse.ArgumentParser(description='Month, start day, end day')
parser.add_argument('--month', type=str, help='Month')
parser.add_argument('--day', type=int, help='Day')
args = parser.parse_args()
month = args.month
day = args.day
day_str = str(day).zfill(2)
# Read shp
cbgs_shp = gpd.read_file('cbg_us.shp')
ct_shp = gpd.read_file('ct_us.shp')
county_shp = gpd.read_file('county_us.shp')
state_shp = gpd.read_file('state_us.shp')
# Pop
pop_ct = pd.read_csv('../resources/pop_ct.csv', dtype={"ct":"object"})
pop_county = pd.read_csv('../resources/pop_county.csv', dtype={"county":"object"})
pop_state = pd.read_csv('../resources/pop_state.csv', dtype={"state":"object"})
# Iterate visit flows
cbg_visits = pd.read_csv(f'{month}/{day_str}/2020-{month}-{day_str}-social-distancing.csv.gz')
flows_unit = []
for i, row in enumerate(cbg_visits.itertuples()):
if row.destination_cbgs == "{}":
continue
else:
origin = row.origin_census_block_group
destination = eval(row.destination_cbgs)
for key, value in destination.items():
d = str(key).zfill(12)
v = value
o = str(origin).zfill(12)
flows_unit.append([o, d, v])
cbg_visits_flow_all = pd.DataFrame(flows_unit, columns=["cbg_o", "cbg_d", "visitor_flows"])
cbg_visits_flow_all_geo = pd.merge(left=cbg_visits_flow_all, right=cbgs_shp[["cbg", "ct", "county_fip", "StateFIPS"]],
left_on="cbg_o", right_on="cbg")
cbg_visits_flow_all_geo = pd.merge(left=cbg_visits_flow_all_geo, right=cbgs_shp[["cbg", "ct", "county_fip", "StateFIPS"]],
left_on="cbg_d", right_on="cbg", suffixes=["__o", "__d"])
cbg_visits_flow_all_geo = cbg_visits_flow_all_geo.drop(["cbg__o", "cbg__d"], axis=1)
cbg_visits_flow_all_geo = cbg_visits_flow_all_geo.rename({"ct__o": "ct_o", "ct__d": "ct_d",
"StateFIPS__o": "state_o", "StateFIPS__d": "state_d",
"county_fip__o":"county_o", "county_fip__d":"county_d"}, axis=1)
# Export O-D flows
def export_od(cbg_visits_flow_all_geo, scale, od_shp):
if scale == "ct":
scale_field = scale
elif scale == "county":
scale_field = f"county_fip"
else:
scale_field = "StateFIPS"
od = cbg_visits_flow_all_geo.groupby([f"{scale}_o", f"{scale}_d"]).sum().reset_index()
od_shp["lng"] = od_shp["geometry"].centroid.x
od_shp["lat"] = od_shp["geometry"].centroid.y
od_all = pd.merge(left=od, left_on=[f"{scale}_o"],
right=od_shp[[f"{scale_field}", "lng", "lat"]], right_on=[f"{scale_field}"])
od_all = pd.merge(left=od_all, left_on=[f"{scale}_d"],
right=od_shp[[f"{scale_field}", "lng", "lat"]], right_on=[f"{scale_field}"],
suffixes=["__o", "__d"])
od_all = od_all.drop([f"{scale_field}__o", f"{scale_field}__d"], axis=1)
od_all = od_all.rename({"lng__o": "lng_o", "lat__o": "lat_o", "lng__d":"lng_d", "lat__d":"lat_d"}, axis=1)
return od_all
ct2ct_all = export_od(cbg_visits_flow_all_geo, "ct", ct_shp)
county2county_all = export_od(cbg_visits_flow_all_geo, "county", county_shp)
state2state_all = export_od(cbg_visits_flow_all_geo, "state", state_shp)
# Num_devices
num_devices = cbg_visits[["origin_census_block_group", "device_count"]].copy()
num_devices["ct"] = num_devices["origin_census_block_group"].apply(lambda x: str(x).zfill(12)[:-1])
num_devices["county"] = num_devices["origin_census_block_group"].apply(lambda x: str(x).zfill(12)[:5])
num_devices["state"] = num_devices["origin_census_block_group"].apply(lambda x: str(x).zfill(12)[:2])
num_devices_ct = num_devices.groupby(["ct"]).sum().drop(["origin_census_block_group"], axis=1).reset_index()
num_devices_county = num_devices.groupby(["county"]).sum().drop(["origin_census_block_group"], axis=1).reset_index()
num_devices_state = num_devices.groupby(["state"]).sum().drop(["origin_census_block_group"], axis=1).reset_index()
# Pop device
pop_device_ct =
|
pd.merge(left=num_devices_ct, left_on="ct", right=pop_ct, right_on="ct")
|
pandas.merge
|
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with
|
tm.assertRaises(ValueError)
|
pandas.util.testing.assertRaises
|
"""
This script reads all the bootstrap performance result files, plots histograms, and calculates averages.
t-tests are done to compute p-values and confidence intervals are computed
"""
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib
from scipy import stats
matplotlib.rcParams.update({'font.size': 8})
# well_list = ["043", "125", "129", "153", "155", "170", "175"]
well_list = ["125"]
for well in well_list: # loop through all wells
# specify folder locations
out_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/rnn_lstm_comparison_results/mmps" + well
rnn_full_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_full_bootstrap_rnn/"
lstm_full_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_full_bootstrap_lstm/"
rnn_storms_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_storm_bootstrap_rnn/"
lstm_storms_results_folder = "C:/Users/<NAME>/PycharmProjects/Tensorflow/Rivanna_results/mmps" + well +\
"_results_storm_bootstrap_lstm/"
folder_list = [rnn_full_results_folder, lstm_full_results_folder, rnn_storms_results_folder,
lstm_storms_results_folder]
rmse_df_list = []
nse_df_list = []
mae_df_list = []
rmse_storms_df_list = []
nse_storms_df_list = []
mae_storms_df_list = []
for folder in folder_list:
folder_name1 = folder.split("/")[6].split("_")[2]
folder_name2 = folder.split("/")[6].split("_")[4]
folder_name = folder_name1 + "_" + folder_name2
print(folder_name)
rmse_t1_list, rmse_t9_list, rmse_t18_list = [], [], []
nse_t1_list, nse_t9_list, nse_t18_list = [], [], []
mae_t1_list, mae_t9_list, mae_t18_list = [], [], []
rmse_storms_t1_list, rmse_storms_t9_list, rmse_storms_t18_list = [], [], []
nse_storms_t1_list, nse_storms_t9_list, nse_storms_t18_list = [], [], []
mae_storms_t1_list, mae_storms_t9_list, mae_storms_t18_list = [], [], []
count = 0
for file in os.listdir(folder): # extract forecast data
if count % 100 == 0:
print(folder, "count is", count)
data = folder + file
if file.endswith("_RMSE.csv"):
# print(file)
rmse_df = pd.read_csv(data)
rmse_t1, rmse_t9, rmse_t18 = rmse_df[["0"]].iloc[0], rmse_df[["0"]].iloc[8], rmse_df[["0"]].iloc[17]
rmse_t1_list.append(rmse_t1[0])
rmse_t9_list.append(rmse_t9[0])
rmse_t18_list.append(rmse_t18[0])
if file.endswith("_NSE.csv"):
nse_df = pd.read_csv(data)
nse_t1, nse_t9, nse_t18 = nse_df[["0"]].iloc[0], nse_df[["0"]].iloc[8], nse_df[["0"]].iloc[17]
nse_t1_list.append(nse_t1[0])
nse_t9_list.append(nse_t9[0])
nse_t18_list.append(nse_t18[0])
if file.endswith("_MAE.csv"):
mae_df = pd.read_csv(data)
mae_t1, mae_t9, mae_t18 = mae_df[["0"]].iloc[0], mae_df[["0"]].iloc[8], mae_df[["0"]].iloc[17]
mae_t1_list.append(mae_t1[0])
mae_t9_list.append(mae_t9[0])
mae_t18_list.append(mae_t18[0])
if file.endswith("_RMSE_storms.csv"):
# print(file)
rmse_df = pd.read_csv(data)
rmse_storms_t1, rmse_storms_t9, rmse_storms_t18 = rmse_df[["0"]].iloc[0], rmse_df[["0"]].iloc[1],\
rmse_df[["0"]].iloc[2]
rmse_storms_t1_list.append(rmse_storms_t1[0])
rmse_storms_t9_list.append(rmse_storms_t9[0])
rmse_storms_t18_list.append(rmse_storms_t18[0])
if file.endswith("_NSE_storms.csv"):
nse_df = pd.read_csv(data)
nse_storms_t1, nse_storms_t9, nse_storms_t18 = nse_df[["0"]].iloc[0], nse_df[["0"]].iloc[1],\
nse_df[["0"]].iloc[2]
nse_storms_t1_list.append(nse_storms_t1[0])
nse_storms_t9_list.append(nse_storms_t9[0])
nse_storms_t18_list.append(nse_storms_t18[0])
if file.endswith("_MAE_storms.csv"):
mae_df = pd.read_csv(data)
mae_storms_t1, mae_storms_t9, mae_storms_t18 = mae_df[["0"]].iloc[0], mae_df[["0"]].iloc[1],\
mae_df[["0"]].iloc[2]
mae_storms_t1_list.append(mae_storms_t1[0])
mae_storms_t9_list.append(mae_storms_t9[0])
mae_storms_t18_list.append(mae_storms_t18[0])
count += 1
# write extracted data to data frames
folder_RMSE_df = pd.DataFrame([rmse_t1_list, rmse_t9_list, rmse_t18_list]).transpose()
folder_RMSE_df.columns = [(folder_name + "_t+1"), (folder_name + "_t+9"), (folder_name + "_t+18")]
# print("folder rmse df", folder_RMSE_df.head())
folder_NSE_df = pd.DataFrame([nse_t1_list, nse_t9_list, nse_t18_list]).transpose()
folder_NSE_df.columns = [(folder_name + "_t+1"), (folder_name + "_t+9"), (folder_name + "_t+18")]
folder_MAE_df = pd.DataFrame([mae_t1_list, mae_t9_list, mae_t18_list]).transpose()
folder_MAE_df.columns = [(folder_name + "_t+1"), (folder_name + "_t+9"), (folder_name + "_t+18")]
if folder_name1 == "full":
folder_storms_RMSE_df = pd.DataFrame([rmse_storms_t1_list, rmse_storms_t9_list, rmse_storms_t18_list])\
.transpose()
folder_storms_RMSE_df.columns = [(folder_name + "storms_t+1"), (folder_name + "storms_t+9"),
(folder_name + "storms_t+18")]
# print("folder rmse df", folder_RMSE_df.head())
folder_storms_NSE_df = pd.DataFrame([nse_storms_t1_list, nse_storms_t9_list, nse_storms_t18_list])\
.transpose()
folder_storms_NSE_df.columns = [(folder_name + "storms_t+1"), (folder_name + "storms_t+9"),
(folder_name + "storms_t+18")]
folder_storms_MAE_df = pd.DataFrame([mae_storms_t1_list, mae_storms_t9_list, mae_storms_t18_list])\
.transpose()
folder_storms_MAE_df.columns = [(folder_name + "storms_t+1"), (folder_name + "storms_t+9"),
(folder_name + "storms_t+18")]
# append folder dataframes to lists
rmse_df_list.append(folder_RMSE_df)
nse_df_list.append(folder_NSE_df)
mae_df_list.append(folder_MAE_df)
if folder_name1 == "full":
rmse_df_list.append(folder_storms_RMSE_df)
nse_df_list.append(folder_storms_NSE_df)
mae_df_list.append(folder_storms_MAE_df)
# concat data to well dfs
rmse_df = pd.concat(rmse_df_list, axis=1)
rmse_df = rmse_df[:948]
nse_df = pd.concat(nse_df_list, axis=1)
nse_df = nse_df[:1000]
mae_df =
|
pd.concat(mae_df_list, axis=1)
|
pandas.concat
|
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp =
|
period_range("2007-01", periods=10, freq="M")
|
pandas.period_range
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import cpi
import csv
import pandas as pd
import unittest
import warnings
from click.testing import CliRunner
from datetime import date, datetime
from cpi import cli
from cpi.errors import CPIDoesNotExist
class CliTest(unittest.TestCase):
def invoke(self, *args):
runner = CliRunner()
result = runner.invoke(cli.inflate, args)
self.assertEqual(result.exit_code, 0)
string_value = result.output.replace("\n", "")
# Do some rounding to ensure the same results for Python 2 and 3
return str(round(float(string_value), 7))
def test_inflate_years(self):
self.assertEqual(self.invoke("100", "1950"), '1017.0954357')
self.assertEqual(self.invoke("100", "1950", "--to", "1960"), "122.8215768")
self.assertEqual(self.invoke("100", "1950", "--to", "1950"), "100.0")
def test_inflate_months(self):
self.assertEqual(self.invoke("100", "1950-01-01"), '1070.587234')
self.assertEqual(self.invoke("100", "1950-01-11"), "1070.587234")
self.assertEqual(
self.invoke("100", "1950-01-11", "--to", "1960-01-01"),
"124.6808511"
)
self.assertEqual(self.invoke("100", "1950-01-01 00:00:00", "--to", "1950-01-01"), "100.0")
self.assertEqual(self.invoke("100", "1950-01-01", "--to", "2018-01-01"), '1054.7531915')
self.assertEqual(self.invoke("100", "1950-01-01", "--to", "1960-01-01"), '124.6808511')
class CPITest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.TEST_YEAR_EARLIER = 1950
cls.TEST_YEAR_MIDDLE = 1960
cls.TEST_YEAR_LATER = 2000
cls.END_YEAR = 2018
cls.EARLIEST_YEAR = 1913
cls.LATEST_YEAR = 2017
cls.DOLLARS = 100
cls.DATA_FILE = 'cpi/data.csv'
def test_get(self):
self.assertEqual(cpi.get(CPITest.TEST_YEAR_EARLIER), 24.1)
self.assertEqual(cpi.get(CPITest.TEST_YEAR_LATER), 172.2)
with self.assertRaises(CPIDoesNotExist):
cpi.get(1900)
with self.assertRaises(CPIDoesNotExist):
cpi.get(date(1900, 1, 1))
with self.assertRaises(CPIDoesNotExist):
cpi.get(1950, series="FOOBAR")
def test_get_value_error(self):
with self.assertRaises(ValueError):
cpi.get(1900.1)
cpi.get(datetime.now())
cpi.get(3000)
def test_inflate_years(self):
self.assertEqual(
cpi.inflate(
CPITest.DOLLARS, CPITest.TEST_YEAR_EARLIER),
1017.0954356846472)
self.assertEqual(
cpi.inflate(
CPITest.DOLLARS, CPITest.TEST_YEAR_EARLIER,
to=CPITest.TEST_YEAR_LATER),
1017.0954356846472)
self.assertEqual(
cpi.inflate(
CPITest.DOLLARS, CPITest.TEST_YEAR_EARLIER,
to=CPITest.TEST_YEAR_MIDDLE),
122.82157676348547)
self.assertEqual(
cpi.inflate(
CPITest.DOLLARS, CPITest.TEST_YEAR_EARLIER,
to=CPITest.TEST_YEAR_EARLIER),
CPITest.DOLLARS)
def test_inflate_months(self):
self.assertEqual(
cpi.inflate(
CPITest.DOLLARS, date(CPITest.TEST_YEAR_EARLIER, 1, 1)),
1070.587234042553)
self.assertEqual(
cpi.inflate(
CPITest.DOLLARS, date(CPITest.TEST_YEAR_EARLIER, 1, 11)),
1070.587234042553)
self.assertEqual(
cpi.inflate(
CPITest.DOLLARS,
datetime(CPITest.TEST_YEAR_EARLIER, 1, 1)),
1070.587234042553)
self.assertEqual(
cpi.inflate(
CPITest.DOLLARS, date(CPITest.TEST_YEAR_EARLIER, 1, 1),
to=date(CPITest.END_YEAR, 1, 1)), 1054.7531914893618)
self.assertEqual(
cpi.inflate(
CPITest.DOLLARS, date(CPITest.TEST_YEAR_EARLIER, 1, 1),
to=date(CPITest.TEST_YEAR_MIDDLE, 1, 1)),
124.68085106382979)
def test_inflate_months_total(self):
def predicate(x, target_year):
return x == target_year
# skip header row
with open(CPITest.DATA_FILE) as f:
d = list(csv.reader(f))[1:]
# get first month for target year
val = next(x for x in d if predicate(int(x[3]), CPITest.END_YEAR))
END_INDEX = float(val[-1])
def calculate_inflation(start_year):
val = next(x for x in d if predicate(int(x[3]), start_year))
start_index = float(val[-1])
return (CPITest.DOLLARS / start_index) * END_INDEX
for year in range(CPITest.TEST_YEAR_EARLIER, CPITest.END_YEAR):
self.assertTrue(
abs(
cpi.inflate(
CPITest.DOLLARS,
date(year, 1, 1),
to=date(CPITest.END_YEAR, 1, 1)) -
calculate_inflation(year)) < 0.001)
def test_deflate(self):
self.assertEqual(
cpi.inflate(
1017.0954356846472, 2017, to=CPITest.TEST_YEAR_EARLIER),
CPITest.DOLLARS)
self.assertEqual(
cpi.inflate(
122.82157676348547,
CPITest.TEST_YEAR_MIDDLE,
to=CPITest.TEST_YEAR_EARLIER),
CPITest.DOLLARS)
def test_numpy_dtypes(self):
self.assertEqual(
cpi.get(pd.np.int64(1950)),
cpi.get(1950)
)
self.assertEqual(
cpi.inflate(100, pd.np.int32(1950)),
cpi.inflate(100, 1950),
)
self.assertEqual(
cpi.inflate(100, pd.np.int64(1950), to=pd.np.int64(1960)),
cpi.inflate(100, 1950, to=1960),
)
self.assertEqual(
cpi.inflate(100, pd.np.int64(1950), to=pd.np.int32(1960)),
cpi.inflate(100, 1950, to=1960),
)
self.assertEqual(
cpi.inflate(100, pd.np.int64(1950), to=1960),
cpi.inflate(100, 1950, to=1960),
)
self.assertEqual(
cpi.inflate(100, pd.to_datetime("1950-07-01"), to=
|
pd.to_datetime("1960-07-01")
|
pandas.to_datetime
|
import turtle
import pandas
import os
LOCATION = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
FILENAME_STATES = "50_states.csv"
FILENAME_BACKGROUND = "blank_states_img.gif"
FILENAME_STATES_TO_LEARN = "states_to_learn.csv"
FULL_PATH_STATES = os.path.join(LOCATION, FILENAME_STATES)
FULL_PATH_BACKGROUND = os.path.join(LOCATION, FILENAME_BACKGROUND)
FULL_PATH_STATES_TO_LEARN = os.path.join(LOCATION, FILENAME_STATES_TO_LEARN)
screen = turtle.Screen()
screen.title("U.S. States Game")
screen.addshape(FULL_PATH_BACKGROUND)
turtle.shape(FULL_PATH_BACKGROUND)
states_data = pandas.read_csv(FULL_PATH_STATES)
us_states = states_data.state.to_list()
guessed_states = []
while len(guessed_states) < 50:
answer_state = screen.textinput(title=f"{len(guessed_states)}/50 States Correct", prompt="What's another state's name?")
answer_state = answer_state.title()
if answer_state == "Exit":
missing_states = []
for state in us_states:
if state not in guessed_states:
missing_states.append(state)
new_data =
|
pandas.DataFrame(missing_states)
|
pandas.DataFrame
|
#!/usr/bin/env python3
import git
import pandas as pd
from hourly import get_work_commits, is_clocked_in, is_clocked_out, update_log, commit_log, get_labor
from hourly import get_hours_worked, get_earnings, get_labor_range
from hourly import plot_labor, get_current_user, get_clocks
from hourly import invoice
from hourly import get_local_timezone
import plotly.graph_objs as go
import plotly.offline as po
from omegaconf import OmegaConf, DictConfig, ListConfig
import hydra
from os import path
import os
import sys
import logging
import copy
import numpy as np
import datetime
def handle_errors(cfg, error_msg = None):
if error_msg is not None:
print(error_msg)
if cfg.handle_errors == 'exit':
sys.exit()
else:
raise
def commit_(repo, commit_message, logfile = None):
if logfile is not None:
repo.index.add([logfile])
commit = repo.index.commit(commit_message)
return commit
def identify_user(user, cfg):
user_id = []
for id_type in cfg.commit.identity:
if id_type in ['name', 'email']:
user_id.append(getattr(user, id_type))
if len(user_id) > 1:
return tuple(user_id)
else:
return user_id[0]
def process_commit(cfg, work, repo):
"""commits clock-in/out message
If only a message is supplied, commits without clocking in/out
"""
header_depth = '#'*cfg.work_log.header_depth
commit_message = cfg.commit.message or ''
log_message = ''
if len(commit_message) > 0:
log_message = '{} {}\n'.format(cfg.work_log.bullet, commit_message)
if 'clock' in cfg.commit:
if cfg.commit.clock is not None:
tminus = cfg.commit.tminus or ''
if len(tminus) != 0:
commit_message = "T-{} {}".format(tminus.strip('T-'), commit_message)
if cfg.commit.clock.lower() == 'in':
last_in = is_clocked_in(work)
if last_in is not None:
time_since_in = pd.datetime.now(last_in.tzinfo) - last_in
raise IOError(
"You are still clocked in!\n" + \
"\tlast clock in: {} ({:.2f} hours ago)".format(
last_in,
time_since_in.total_seconds()/3600.))
else:
if len(commit_message) == 0:
commit_message = "clock-in"
else:
commit_message = "clock-in: {}".format(commit_message)
log_message = "\n{} {}: {}\n\n".format(
header_depth,
pd.datetime.now(),
commit_message)
print("clocking in with message: {} ".format(commit_message))
elif cfg.commit.clock.lower() == 'out': # prevent clock in and out at the same time
last_out = is_clocked_out(work)
if last_out is not None:
time_since_out = pd.datetime.now(last_out.tzinfo) - last_out
raise IOError(
"You already clocked out!\n" + \
"\tlast clock out: {} ({:.2f} hours ago)".format(
last_out,
time_since_out.total_seconds()/3600.))
else:
if len(commit_message) == 0:
commit_message = "clock-out"
else:
commit_message = "clock-out: {}".format(commit_message)
log_message = "{} {}: {}\n\n".format(
header_depth,
pd.datetime.now(),
commit_message)
print("clocking out with message: {} ".format(commit_message))
else:
raise IOError("unrecocgnized clock value: {}".format(cfg.commit.clock))
# logfile = hydra.utils.to_absolute_path(cfg.work_log.filename)
logfile = os.path.abspath(cfg.work_log.filename)
if len(log_message) > 0:
update_log(logfile, log_message)
return commit_(repo, commit_message, logfile)
def flatten_dict(d, sep = '.'):
'''flattens a dictionary into list of
courtesy of MYGz https://stackoverflow.com/a/41801708
returns [{k.sub_key:v},...]
'''
return pd.io.json.json_normalize(d, sep=sep).to_dict(orient='records')[0]
def config_override(cfg):
"""Overrides with user-supplied configuration
hourly will override its configuration using
hourly.yaml if it is in the base git directory
or users can set an override config:
config_override=path/to/myconfig.yaml
"""
# change to the git directory of the original working dir
original_path = hydra.utils.get_original_cwd()
change_git_dir(original_path, verbosity = cfg.verbosity)
# get the full path of the override file if available
override_path = os.path.abspath(cfg.config_override)
if path.exists(override_path):
if cfg.verbosity > 0:
print("overriding config with {}".format(override_path))
override_conf = OmegaConf.load(override_path)
# merge overrides first input with second
cfg = OmegaConf.merge(cfg, override_conf)
else:
if cfg.verbosity > 0:
print("override path does not exist: {}".format(override_path))
# merge in command line arguments
cli_conf = OmegaConf.from_cli()
cfg = OmegaConf.merge(cfg, cli_conf)
return cfg
def get_user_work(work, current_user, identifier):
for user_id, user_work in work.groupby(identifier):
if user_id == current_user:
return user_work
def resolve(cfg):
"""Expands a configuration, interpolating variables"""
cfg_dict = cfg.to_container(resolve = True)
return OmegaConf.create(cfg_dict)
def localize(t):
if t is None:
return t
if t.tzinfo is None:
LOCAL_TIMEZONE = get_local_timezone()
return t.tz_localize(LOCAL_TIMEZONE)
else:
return t
def get_avg_time(cfg, labor, total_hours):
tdelta = labor.set_index('TimeIn').TimeDelta.groupby(pd.Grouper(freq = cfg.vis.frequency)).sum()
tmin = tdelta.index.min()
tmax = tdelta.index.max()
time_range_sec = (tmax - tmin).total_seconds()
if cfg.verbosity:
print('freq: {}'.format(cfg.vis.frequency))
print('time range [sec]: {}'.format(time_range_sec))
bin_size_val, bin_size_unit = cfg.vis.frequency.split(' ')
bin_size = pd.Timedelta(float(bin_size_val), unit = bin_size_unit)
bin_size_sec = bin_size.total_seconds()
if cfg.verbosity:
print('bin size : {} [sec]: {}'.format(bin_size, bin_size_sec))
time_bins = (time_range_sec/bin_size_sec) + 1
avg_time = total_hours/time_bins
if cfg.verbosity:
print('hours: {} bins: {} average time: {}'.format(total_hours, time_bins, avg_time))
return avg_time, tmin, tmax
def divide_labor(cfg, labor):
"""divides labor among multiple repo names"""
rows = []
for _, row in labor.iterrows():
if isinstance(row.repo, tuple):
if cfg.verbosity > 1:
print("!!!!!!!Found multiple names {} !!!!!!".format(row.repo))
# divide evenly among the tags
tag_count = len(row.repo)
row_tag = row
row_tag.TimeDelta = row.TimeDelta/tag_count
row_tag.Hours = row.Hours/tag_count
for repo_name in row.repo:
row_tag.repo = repo_name
rows.append(pd.DataFrame(row_tag).T)
if cfg.verbosity > 1:
print(' appending {}'.format(repo_name))
else:
if cfg.verbosity > 1:
print('appending {}'.format(row.repo))
rows.append(pd.DataFrame(row).T)
return pd.concat(rows)
def run_report(cfg):
if cfg.verbosity > 1:
print(cfg.pretty())
repos = resolve(cfg.report.repos)
if 'start_date' in cfg.repo:
start_date = pd.to_datetime(cfg.repo.start_date)
start_date = localize(start_date)
else:
start_date = None
if 'end_date' in cfg.repo:
end_date = pd.to_datetime(cfg.repo.end_date)
end_date = localize(end_date)
else:
end_date = None
if 'pandas' in cfg.report:
pd_opts = flatten_dict(
OmegaConf.to_container(cfg.report.pandas))
for k,v in pd_opts.items():
pd.set_option(k,v)
clocks = pd.DataFrame()
if type(cfg.commit.identity) is str:
identifier = [cfg.commit.identity]
else:
identifier = cfg.commit.identity.to_container()
labor =
|
pd.DataFrame()
|
pandas.DataFrame
|
from __future__ import unicode_literals
import copy
import io
import itertools
import json
import os
import shutil
import string
import sys
from collections import OrderedDict
from future.utils import iteritems
from unittest import TestCase
import pandas as pd
import pytest
from backports.tempfile import TemporaryDirectory
from tempfile import NamedTemporaryFile
from hypothesis import (
given,
HealthCheck,
reproduce_failure,
settings,
)
from hypothesis.strategies import (
dictionaries,
integers,
floats,
just,
lists,
text,
tuples,
)
from mock import patch, Mock
from oasislmf.model_preparation.manager import OasisManager as om
from oasislmf.model_preparation.pipeline import OasisFilesPipeline as ofp
from oasislmf.models.model import OasisModel
from oasislmf.utils.exceptions import OasisException
from oasislmf.utils.fm import (
unified_canonical_fm_profile_by_level_and_term_group,
)
from oasislmf.utils.metadata import (
OASIS_COVERAGE_TYPES,
OASIS_FM_LEVELS,
OASIS_KEYS_STATUS,
OASIS_PERILS,
OED_COVERAGE_TYPES,
OED_PERILS,
)
from ..models.fakes import fake_model
from ..data import (
canonical_accounts,
canonical_accounts_profile,
canonical_exposure,
canonical_exposure_profile,
canonical_oed_accounts,
canonical_oed_accounts_profile,
canonical_oed_exposure,
canonical_oed_exposure_profile,
fm_input_items,
gul_input_items,
keys,
oasis_fm_agg_profile,
oed_fm_agg_profile,
write_canonical_files,
write_canonical_oed_files,
write_keys_files,
)
class AddModel(TestCase):
def test_models_is_empty___model_is_added_to_model_dict(self):
model = fake_model('supplier', 'model', 'version')
manager = om()
manager.add_model(model)
self.assertEqual({model.key: model}, manager.models)
def test_manager_already_contains_a_model_with_the_given_key___model_is_replaced_in_models_dict(self):
first = fake_model('supplier', 'model', 'version')
second = fake_model('supplier', 'model', 'version')
manager = om(oasis_models=[first])
manager.add_model(second)
self.assertIs(second, manager.models[second.key])
def test_manager_already_contains_a_diferent_model___model_is_added_to_dict(self):
first = fake_model('first', 'model', 'version')
second = fake_model('second', 'model', 'version')
manager = om(oasis_models=[first])
manager.add_model(second)
self.assertEqual({
first.key: first,
second.key: second,
}, manager.models)
class DeleteModels(TestCase):
def test_models_is_not_in_manager___no_model_is_removed(self):
manager = om([
fake_model('supplier', 'model', 'version'),
fake_model('supplier2', 'model2', 'version2'),
])
expected = manager.models
manager.delete_models([fake_model('supplier3', 'model3', 'version3')])
self.assertEqual(expected, manager.models)
def test_models_exist_in_manager___models_are_removed(self):
models = [
fake_model('supplier', 'model', 'version'),
fake_model('supplier2', 'model2', 'version2'),
fake_model('supplier3', 'model3', 'version3'),
]
manager = om(models)
manager.delete_models(models[1:])
self.assertEqual({models[0].key: models[0]}, manager.models)
class GetCanonicalExposureProfile(TestCase):
def test_model_and_kwargs_are_not_set___result_is_null(self):
profile = om().get_canonical_exposure_profile()
self.assertEqual(None, profile)
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self, expected):
model = fake_model(resources={'canonical_exposure_profile_json': json.dumps(expected)})
profile = om().get_canonical_exposure_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_exposure_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
model = fake_model(resources={'canonical_exposure_profile_json': json.dumps(model_profile)})
profile = om().get_canonical_exposure_profile(oasis_model=model, canonical_exposure_profile_json=json.dumps(kwargs_profile))
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_exposure_profile'])
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path___models_profile_is_set_to_expected_json(self, expected):
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'canonical_exposure_profile_path': f.name})
profile = om().get_canonical_exposure_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_exposure_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_model_is_set_with_profile_json_path_and_profile_json_path_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file:
json.dump(model_profile, model_file)
model_file.flush()
json.dump(kwargs_profile, kwargs_file)
kwargs_file.flush()
model = fake_model(resources={'canonical_exposure_profile_path': model_file.name})
profile = om().get_canonical_exposure_profile(oasis_model=model, canonical_exposure_profile_path=kwargs_file.name)
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_exposure_profile'])
class CreateModel(TestCase):
def create_model(
self,
lookup='lookup',
keys_file_path='key_file_path',
keys_errors_file_path='keys_error_file_path',
model_exposure_file_path='model_exposure_file_path'
):
model = fake_model(resources={'lookup': lookup})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file_path
model.resources['oasis_files_pipeline'].keys_errors_file_path = keys_errors_file_path
model.resources['oasis_files_pipeline'].model_exposure_file_path = model_exposure_file_path
return model
@given(
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_file_path=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_file_path=text(min_size=1, alphabet=string.ascii_letters),
exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_supplier_and_model_and_version_only_are_supplied___correct_model_is_returned(
self,
lookup,
keys_file_path,
keys_errors_file_path,
exposure_file_path
):
model = self.create_model(lookup=lookup, keys_file_path=keys_file_path, keys_errors_file_path=keys_errors_file_path, model_exposure_file_path=exposure_file_path)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_file_path, 1, keys_errors_file_path, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(oasis_model=model)
oklf_mock.assert_called_once_with(
lookup,
keys_file_path,
errors_fp=keys_errors_file_path,
model_exposure_fp=exposure_file_path
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_file_path)
self.assertEqual(res_keys_file_path, keys_file_path)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_file_path)
self.assertEqual(res_keys_errors_file_path, keys_errors_file_path)
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version=text(alphabet=string.ascii_letters, min_size=1),
model_lookup=text(min_size=1, alphabet=string.ascii_letters),
model_keys_fp=text(alphabet=string.ascii_letters, min_size=1),
model_keys_errors_fp=text(alphabet=string.ascii_letters, min_size=1),
model_exposure_fp=text(alphabet=string.ascii_letters, min_size=1),
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_fp=text(alphabet=string.ascii_letters, min_size=1),
keys_errors_fp=text(alphabet=string.ascii_letters, min_size=1),
exposure_fp=text(alphabet=string.ascii_letters, min_size=1)
)
def test_supplier_and_model_and_version_and_absolute_oasis_files_path_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version,
model_lookup,
model_keys_fp,
model_keys_errors_fp,
model_exposure_fp,
lookup,
keys_fp,
keys_errors_fp,
exposure_fp
):
resources={
'lookup': model_lookup,
'keys_file_path': model_keys_fp,
'keys_errors_file_path': model_keys_errors_fp,
'model_exposure_file_path': model_exposure_fp
}
model = om().create_model(supplier_id, model_id, version, resources=resources)
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_fp, 1, keys_errors_fp, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(
oasis_model=model,
lookup=lookup,
model_exposure_file_path=exposure_fp,
keys_file_path=keys_fp,
keys_errors_file_path=keys_errors_fp
)
oklf_mock.assert_called_once_with(
lookup,
keys_fp,
errors_fp=keys_errors_fp,
model_exposure_fp=exposure_fp
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_fp)
self.assertEqual(res_keys_file_path, keys_fp)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_fp)
self.assertEqual(res_keys_errors_file_path, keys_errors_fp)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources, model.resources)
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1)
)
def test_supplier_and_model_and_version_and_relative_oasis_files_path_only_are_supplied___correct_model_is_returned_with_absolute_oasis_file_path(
self,
supplier_id,
model_id,
version_id,
oasis_files_path
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
oasis_files_path = oasis_files_path.lstrip(os.path.sep)
resources={'oasis_files_path': oasis_files_path}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertTrue(os.path.isabs(model.resources['oasis_files_path']))
self.assertEqual(os.path.abspath(resources['oasis_files_path']), model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_canonical_exposure_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
canonical_exposure_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'canonical_exposure_profile': canonical_exposure_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources['canonical_exposure_profile'], model.resources['canonical_exposure_profile'])
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_relative_oasis_files_path_and_canonical_exposure_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
oasis_files_path,
canonical_exposure_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'oasis_files_path': oasis_files_path, 'canonical_exposure_profile': canonical_exposure_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertTrue(os.path.isabs(model.resources['oasis_files_path']))
self.assertEqual(os.path.abspath(resources['oasis_files_path']), model.resources['oasis_files_path'])
self.assertEqual(resources['canonical_exposure_profile'], model.resources['canonical_exposure_profile'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_absolute_oasis_files_path_and_canonical_exposure_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
oasis_files_path,
canonical_exposure_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'oasis_files_path': os.path.abspath(oasis_files_path), 'canonical_exposure_profile': canonical_exposure_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources['oasis_files_path'], model.resources['oasis_files_path'])
self.assertEqual(resources['canonical_exposure_profile'], model.resources['canonical_exposure_profile'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1)
)
def test_supplier_and_model_and_version_and_source_accounts_file_path_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
source_accounts_file_path
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'source_accounts_file_path': source_accounts_file_path}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
self.assertIsNone(model.resources.get('canonical_accounts_profile'))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_accounts_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_source_accounts_file_path_and_canonical_accounts_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
source_accounts_file_path,
canonical_accounts_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'source_accounts_file_path': source_accounts_file_path, 'canonical_accounts_profile': canonical_accounts_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertEqual(resources['canonical_accounts_profile'], model.resources['canonical_accounts_profile'])
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1)),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_accounts_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_canonical_exposure_profile_and_source_accounts_file_path_and_canonical_accounts_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
canonical_exposure_profile,
source_accounts_file_path,
canonical_accounts_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={
'canonical_exposure_profile': canonical_exposure_profile,
'source_accounts_file_path': source_accounts_file_path,
'canonical_accounts_profile': canonical_accounts_profile
}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertEqual(resources['canonical_exposure_profile'], model.resources.get('canonical_exposure_profile'))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertEqual(resources['canonical_accounts_profile'], model.resources['canonical_accounts_profile'])
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1)),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_accounts_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_absolute_oasis_files_path_and_canonical_exposure_profile_and_source_accounts_file_path_and_canonical_accounts_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
oasis_files_path,
canonical_exposure_profile,
source_accounts_file_path,
canonical_accounts_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={
'oasis_files_path': os.path.abspath(oasis_files_path),
'canonical_exposure_profile': canonical_exposure_profile,
'source_accounts_file_path': source_accounts_file_path,
'canonical_accounts_profile': canonical_accounts_profile
}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources['oasis_files_path'], model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertEqual(resources['canonical_exposure_profile'], model.resources.get('canonical_exposure_profile'))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertEqual(resources['canonical_accounts_profile'], model.resources['canonical_accounts_profile'])
class LoadCanonicalAccountsProfile(TestCase):
def test_model_and_kwargs_are_not_set___result_is_null(self):
profile = om().get_canonical_accounts_profile()
self.assertEqual(None, profile)
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self, expected):
model = fake_model(resources={'canonical_accounts_profile_json': json.dumps(expected)})
profile = om().get_canonical_accounts_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_accounts_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
model = fake_model(resources={'canonical_accounts_profile_json': json.dumps(model_profile)})
profile = om().get_canonical_accounts_profile(oasis_model=model, canonical_accounts_profile_json=json.dumps(kwargs_profile))
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_accounts_profile'])
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path___models_profile_is_set_to_expected_json(self, expected):
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'canonical_accounts_profile_path': f.name})
profile = om().get_canonical_accounts_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_accounts_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path_and_profile_json_path_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file:
json.dump(model_profile, model_file)
model_file.flush()
json.dump(kwargs_profile, kwargs_file)
kwargs_file.flush()
model = fake_model(resources={'canonical_accounts_profile_path': model_file.name})
profile = om().get_canonical_accounts_profile(oasis_model=model, canonical_accounts_profile_path=kwargs_file.name)
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_accounts_profile'])
class GetFmAggregationProfile(TestCase):
def setUp(self):
self.profile = oasis_fm_agg_profile
def test_model_and_kwargs_are_not_set___result_is_null(self):
profile = om().get_fm_aggregation_profile()
self.assertEqual(None, profile)
def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self):
expected = self.profile
profile_json = json.dumps(self.profile)
model = fake_model(resources={'fm_agg_profile_json': profile_json})
profile = om().get_fm_aggregation_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['fm_agg_profile'])
def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used(self):
model = fake_model(resources={'fm_agg_profile_json': json.dumps(self.profile)})
profile = om().get_fm_aggregation_profile(oasis_model=model, fm_agg_profile_json=json.dumps(self.profile))
self.assertEqual(self.profile, profile)
self.assertEqual(self.profile, model.resources['fm_agg_profile'])
def test_model_is_set_with_profile_path___models_profile_is_set_to_expected_json(self):
expected = self.profile
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'fm_agg_profile_path': f.name})
profile = om().get_fm_aggregation_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['fm_agg_profile'])
def test_model_is_set_with_profile_path_and_profile_path_is_passed_through_kwargs___kwargs_profile_is_used(
self
):
with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file:
json.dump(self.profile, model_file)
model_file.flush()
json.dump(self.profile, kwargs_file)
kwargs_file.flush()
model = fake_model(resources={'fm_agg_profile_path': model_file.name})
profile = om().get_fm_aggregation_profile(oasis_model=model, fm_agg_profile_path=kwargs_file.name)
self.assertEqual(self.profile, profile)
self.assertEqual(self.profile, model.resources['fm_agg_profile'])
@pytest.mark.skipif(True, reason="CSV file transformations to be removed")
class TransformSourceToCanonical(TestCase):
@given(
source_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_to_canonical_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_exposure_validation_file_path=text(alphabet=string.ascii_letters),
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_not_set___parameters_are_taken_from_kwargs(
self,
source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path,
source_exposure_validation_file_path,
canonical_exposure_file_path
):
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
om().transform_source_to_canonical(
source_exposure_file_path=source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path=source_to_canonical_exposure_transformation_file_path,
canonical_exposure_file_path=canonical_exposure_file_path
)
trans_mock.assert_called_once_with(
os.path.abspath(source_exposure_file_path),
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(source_to_canonical_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=True,
)
trans_call_mock.assert_called_once_with()
@given(
source_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_to_canonical_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_exposure_validation_file_path=text(alphabet=string.ascii_letters),
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_set___parameters_are_taken_from_model(
self,
source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path,
source_exposure_validation_file_path,
canonical_exposure_file_path):
model = fake_model(resources={
'source_exposure_file_path': source_exposure_file_path,
'source_exposure_validation_file_path': source_exposure_validation_file_path,
'source_to_canonical_exposure_transformation_file_path': source_to_canonical_exposure_transformation_file_path,
})
model.resources['oasis_files_pipeline'].canonical_exposure_path = canonical_exposure_file_path
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
#import ipdb; ipdb.set_trace()
om().transform_source_to_canonical(
source_exposure_file_path=source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path=source_to_canonical_exposure_transformation_file_path,
canonical_exposure_file_path=canonical_exposure_file_path
)
trans_mock.assert_called_once_with(
os.path.abspath(source_exposure_file_path),
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(source_to_canonical_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=True
)
trans_call_mock.assert_called_once_with()
@pytest.mark.skipif(True, reason="CSV file transformations to be removed")
class TransformCanonicalToModel(TestCase):
@given(
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_to_model_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_exposure_validation_file_path=text(alphabet=string.ascii_letters),
model_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_not_set___parameters_are_taken_from_kwargs(
self,
canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path,
canonical_exposure_validation_file_path,
model_exposure_file_path):
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
om().transform_canonical_to_model(
canonical_exposure_file_path=canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path=canonical_to_model_exposure_transformation_file_path,
model_exposure_file_path=model_exposure_file_path,
)
trans_mock.assert_called_once_with(
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(model_exposure_file_path),
os.path.abspath(canonical_to_model_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=False
)
trans_call_mock.assert_called_once_with()
@given(
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_to_model_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_exposure_validation_file_path=text(alphabet=string.ascii_letters),
model_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_set___parameters_are_taken_from_model(
self,
canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path,
canonical_exposure_validation_file_path,
model_exposure_file_path):
model = fake_model(resources={
'canonical_exposure_validation_file_path': canonical_exposure_validation_file_path,
'canonical_to_model_exposure_transformation_file_path': canonical_to_model_exposure_transformation_file_path,
})
model.resources['oasis_files_pipeline'].canonical_exposure_path = canonical_exposure_file_path
model.resources['oasis_files_pipeline'].model_exposure_file_path = model_exposure_file_path
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
om().transform_canonical_to_model(
canonical_exposure_file_path=canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path=canonical_to_model_exposure_transformation_file_path,
model_exposure_file_path=model_exposure_file_path,
)
trans_mock.assert_called_once_with(
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(model_exposure_file_path),
os.path.abspath(canonical_to_model_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=False
)
trans_call_mock.assert_called_once_with()
class GetKeys(TestCase):
def create_model(
self,
lookup='lookup',
keys_file_path='key_file_path',
keys_errors_file_path='keys_errors_file_path',
model_exposure_file_path='model_exposure_file_path'
):
model = fake_model(resources={'lookup': lookup})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file_path
model.resources['oasis_files_pipeline'].keys_errors_file_path = keys_errors_file_path
model.resources['oasis_files_pipeline'].model_exposure_file_path = model_exposure_file_path
return model
@given(
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_file_path=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_file_path=text(min_size=1, alphabet=string.ascii_letters),
exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_supplied_kwargs_are_not___lookup_keys_files_and_exposures_file_from_model_are_used(
self,
lookup,
keys_file_path,
keys_errors_file_path,
exposure_file_path
):
model = self.create_model(lookup=lookup, keys_file_path=keys_file_path, keys_errors_file_path=keys_errors_file_path, model_exposure_file_path=exposure_file_path)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_file_path, 1, keys_errors_file_path, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(oasis_model=model)
oklf_mock.assert_called_once_with(
lookup,
keys_file_path,
errors_fp=keys_errors_file_path,
model_exposure_fp=exposure_file_path
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_file_path)
self.assertEqual(res_keys_file_path, keys_file_path)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_file_path)
self.assertEqual(res_keys_errors_file_path, keys_errors_file_path)
@given(
model_lookup=text(min_size=1, alphabet=string.ascii_letters),
model_keys_fp=text(min_size=1, alphabet=string.ascii_letters),
model_keys_errors_fp=text(min_size=1, alphabet=string.ascii_letters),
model_exposure_fp=text(min_size=1, alphabet=string.ascii_letters),
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_fp=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_fp=text(min_size=1, alphabet=string.ascii_letters),
exposures_fp=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_and_kwargs_are_supplied___lookup_keys_files_and_exposures_file_from_kwargs_are_used(
self,
model_lookup,
model_keys_fp,
model_keys_errors_fp,
model_exposure_fp,
lookup,
keys_fp,
keys_errors_fp,
exposures_fp
):
model = self.create_model(lookup=model_lookup, keys_file_path=model_keys_fp, keys_errors_file_path=model_keys_errors_fp, model_exposure_file_path=model_exposure_fp)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_fp, 1, keys_errors_fp, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(
oasis_model=model,
lookup=lookup,
model_exposure_file_path=exposures_fp,
keys_file_path=keys_fp,
keys_errors_file_path=keys_errors_fp
)
oklf_mock.assert_called_once_with(
lookup,
keys_fp,
errors_fp=keys_errors_fp,
model_exposure_fp=exposures_fp
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_fp)
self.assertEqual(res_keys_file_path, keys_fp)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_fp)
self.assertEqual(res_keys_errors_file_path, keys_errors_fp)
class GetGulInputItems(TestCase):
def setUp(self):
self.profile = copy.deepcopy(canonical_exposure_profile)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=0),
keys=keys(size=2)
)
def test_no_fm_terms_in_canonical_profile__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
_p =copy.deepcopy(profile)
for _k, _v in iteritems(_p):
for __k, __v in iteritems(_v):
if 'FM' in __k:
profile[_k].pop(__k)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=0),
keys=keys(size=2)
)
def test_no_canonical_items__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
keys=keys(size=0)
)
def test_no_keys_items__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
keys=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']), size=2)
)
def test_canonical_items_dont_match_any_keys_items__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
l = len(exposures)
for key in keys:
key['id'] += l
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
keys=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']), size=2)
)
def test_canonical_profile_doesnt_have_any_tiv_fields__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
tivs = [profile[e]['ProfileElementName'] for e in profile if profile[e].get('FMTermType') and profile[e]['FMTermType'].lower() == 'tiv']
for t in tivs:
profile.pop(t)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(
from_tivs1=just(0.0),
size=2
),
keys=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']), size=2)
)
def test_canonical_items_dont_have_any_positive_tivs__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(
from_tivs1=just(1.0),
size=2
),
keys=keys(
from_coverage_type_ids=just(OASIS_COVERAGE_TYPES['buildings']['id']),
from_statuses=just(OASIS_KEYS_STATUS['success']['id']),
size=2
)
)
def test_only_buildings_coverage_type_in_exposure_and_model_lookup_supporting_single_peril_and_buildings_coverage_type__gul_items_are_generated(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
ufcp = unified_canonical_fm_profile_by_level_and_term_group(profiles=(profile,))
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
matching_canonical_and_keys_item_ids = set(k['id'] for k in keys).intersection([e['row_id'] for e in exposures])
gul_items_df, canexp_df = om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
get_canonical_item = lambda i: (
[e for e in exposures if e['row_id'] == i + 1][0] if len([e for e in exposures if e['row_id'] == i + 1]) == 1
else None
)
get_keys_item = lambda i: (
[k for k in keys if k['id'] == i + 1][0] if len([k for k in keys if k['id'] == i + 1]) == 1
else None
)
tiv_elements = (ufcp[1][1]['tiv'],)
fm_terms = {
1: {
'deductible': 'wscv1ded',
'deductible_min': None,
'deductible_max': None,
'limit': 'wscv1limit',
'share': None
}
}
for i, gul_it in enumerate(gul_items_df.T.to_dict().values()):
can_it = get_canonical_item(int(gul_it['canexp_id']))
self.assertIsNotNone(can_it)
keys_it = get_keys_item(int(gul_it['canexp_id']))
self.assertIsNotNone(keys_it)
positive_tiv_elements = [
t for t in tiv_elements if can_it.get(t['ProfileElementName'].lower()) and can_it[t['ProfileElementName'].lower()] > 0 and t['CoverageTypeID'] == keys_it['coverage_type']
]
for _, t in enumerate(positive_tiv_elements):
tiv_elm = t['ProfileElementName'].lower()
self.assertEqual(tiv_elm, gul_it['tiv_elm'])
tiv_tgid = t['FMTermGroupID']
self.assertEqual(can_it[tiv_elm], gul_it['tiv'])
ded_elm = fm_terms[tiv_tgid].get('deductible')
self.assertEqual(ded_elm, gul_it['ded_elm'])
ded_min_elm = fm_terms[tiv_tgid].get('deductible_min')
self.assertEqual(ded_min_elm, gul_it['ded_min_elm'])
ded_max_elm = fm_terms[tiv_tgid].get('deductible_max')
self.assertEqual(ded_max_elm, gul_it['ded_max_elm'])
lim_elm = fm_terms[tiv_tgid].get('limit')
self.assertEqual(lim_elm, gul_it['lim_elm'])
shr_elm = fm_terms[tiv_tgid].get('share')
self.assertEqual(shr_elm, gul_it['shr_elm'])
self.assertEqual(keys_it['area_peril_id'], gul_it['areaperil_id'])
self.assertEqual(keys_it['vulnerability_id'], gul_it['vulnerability_id'])
self.assertEqual(i + 1, gul_it['item_id'])
self.assertEqual(i + 1, gul_it['coverage_id'])
self.assertEqual(can_it['row_id'], gul_it['group_id'])
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(
from_tivs1=floats(min_value=1.0, allow_infinity=False),
from_tivs2=floats(min_value=2.0, allow_infinity=False),
from_tivs3=floats(min_value=3.0, allow_infinity=False),
from_tivs4=floats(min_value=4.0, allow_infinity=False),
size=2
),
keys=keys(
from_peril_ids=just(OASIS_PERILS['wind']['id']),
from_coverage_type_ids=just(OASIS_COVERAGE_TYPES['buildings']['id']),
from_statuses=just(OASIS_KEYS_STATUS['success']['id']),
size=8
)
)
def test_all_coverage_types_in_exposure_and_model_lookup_supporting_multiple_perils_but_only_buildings_and_other_structures_coverage_types__gul_items_are_generated(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
ufcp = unified_canonical_fm_profile_by_level_and_term_group(profiles=(profile,))
exposures[1]['wscv2val'] = exposures[1]['wscv3val'] = exposures[1]['wscv4val'] = 0.0
keys[1]['id'] = keys[2]['id'] = keys[3]['id'] = 1
keys[2]['peril_id'] = keys[3]['peril_id'] = OASIS_PERILS['quake']['id']
keys[1]['coverage_type'] = keys[3]['coverage_type'] = OASIS_COVERAGE_TYPES['other']['id']
keys[4]['id'] = keys[5]['id'] = keys[6]['id'] = keys[7]['id'] = 2
keys[6]['peril_id'] = keys[7]['peril_id'] = OASIS_PERILS['quake']['id']
keys[5]['coverage_type'] = keys[7]['coverage_type'] = OASIS_COVERAGE_TYPES['other']['id']
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
matching_canonical_and_keys_item_ids = set(k['id'] for k in keys).intersection([e['row_id'] for e in exposures])
gul_items_df, canexp_df = om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
self.assertEqual(len(gul_items_df), 6)
self.assertEqual(len(canexp_df), 2)
tiv_elements = (ufcp[1][1]['tiv'], ufcp[1][2]['tiv'])
fm_terms = {
1: {
'deductible': 'wscv1ded',
'deductible_min': None,
'deductible_max': None,
'limit': 'wscv1limit',
'share': None
},
2: {
'deductible': 'wscv2ded',
'deductible_min': None,
'deductible_max': None,
'limit': 'wscv2limit',
'share': None
}
}
for i, gul_it in enumerate(gul_items_df.T.to_dict().values()):
can_it = canexp_df.iloc[gul_it['canexp_id']].to_dict()
keys_it = [k for k in keys if k['id'] == gul_it['canexp_id'] + 1 and k['peril_id'] == gul_it['peril_id'] and k['coverage_type'] == gul_it['coverage_type_id']][0]
positive_tiv_term = [t for t in tiv_elements if can_it.get(t['ProfileElementName'].lower()) and can_it[t['ProfileElementName'].lower()] > 0 and t['CoverageTypeID'] == keys_it['coverage_type']][0]
tiv_elm = positive_tiv_term['ProfileElementName'].lower()
self.assertEqual(tiv_elm, gul_it['tiv_elm'])
tiv_tgid = positive_tiv_term['FMTermGroupID']
self.assertEqual(can_it[tiv_elm], gul_it['tiv'])
ded_elm = fm_terms[tiv_tgid].get('deductible')
self.assertEqual(ded_elm, gul_it['ded_elm'])
ded_min_elm = fm_terms[tiv_tgid].get('deductible_min')
self.assertEqual(ded_min_elm, gul_it['ded_min_elm'])
ded_max_elm = fm_terms[tiv_tgid].get('deductible_max')
self.assertEqual(ded_max_elm, gul_it['ded_max_elm'])
lim_elm = fm_terms[tiv_tgid].get('limit')
self.assertEqual(lim_elm, gul_it['lim_elm'])
shr_elm = fm_terms[tiv_tgid].get('share')
self.assertEqual(shr_elm, gul_it['shr_elm'])
self.assertEqual(keys_it['area_peril_id'], gul_it['areaperil_id'])
self.assertEqual(keys_it['vulnerability_id'], gul_it['vulnerability_id'])
self.assertEqual(i + 1, gul_it['item_id'])
self.assertEqual(i + 1, gul_it['coverage_id'])
self.assertEqual(can_it['row_id'], gul_it['group_id'])
class GetFmInputItems(TestCase):
def setUp(self):
self.exposures_profile = copy.deepcopy(canonical_exposure_profile)
self.accounts_profile = copy.deepcopy(canonical_accounts_profile)
self.unified_canonical_profile = unified_canonical_fm_profile_by_level_and_term_group(
profiles=[self.exposures_profile, self.accounts_profile]
)
self.fm_agg_profile = copy.deepcopy(oasis_fm_agg_profile)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
accounts=canonical_accounts(size=1),
guls=gul_input_items(size=2)
)
def test_no_fm_terms_in_canonical_profiles__oasis_exception_is_raised(
self,
exposures,
accounts,
guls
):
cep = copy.deepcopy(self.exposures_profile)
cap = copy.deepcopy(self.accounts_profile)
_cep =copy.deepcopy(cep)
_cap =copy.deepcopy(cap)
for _k, _v in iteritems(_cep):
for __k, __v in iteritems(_v):
if 'FM' in __k:
cep[_k].pop(__k)
for _k, _v in iteritems(_cap):
for __k, __v in iteritems(_v):
if 'FM' in __k:
cap[_k].pop(__k)
with NamedTemporaryFile('w') as accounts_file:
write_canonical_files(accounts, accounts_file.name)
with self.assertRaises(OasisException):
fm_df, canacc_df = om().get_fm_input_items(
pd.DataFrame(data=exposures),
pd.DataFrame(data=guls),
cep,
cap,
accounts_file.name,
self.fm_agg_profile
)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
guls=gul_input_items(size=2)
)
def test_no_aggregation_profile__oasis_exception_is_raised(
self,
exposures,
guls
):
cep = copy.deepcopy(self.exposures_profile)
cap = copy.deepcopy(self.accounts_profile)
fmap = {}
with NamedTemporaryFile('w') as accounts_file:
write_canonical_files(canonical_accounts=[], canonical_accounts_file_path=accounts_file.name)
with self.assertRaises(OasisException):
fm_df, canacc_df = om().get_fm_input_items(
pd.DataFrame(data=exposures),
pd.DataFrame(data=guls),
cep,
cap,
accounts_file.name,
fmap
)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
guls=gul_input_items(size=2)
)
def test_no_canonical_accounts_items__oasis_exception_is_raised(
self,
exposures,
guls
):
cep = copy.deepcopy(self.exposures_profile)
cap = copy.deepcopy(self.accounts_profile)
fmap = copy.deepcopy(self.fm_agg_profile)
with NamedTemporaryFile('w') as accounts_file:
write_canonical_files(canonical_accounts=[], canonical_accounts_file_path=accounts_file.name)
with self.assertRaises(OasisException):
fm_df, canacc_df = om().get_fm_input_items(
pd.DataFrame(data=exposures),
|
pd.DataFrame(data=guls)
|
pandas.DataFrame
|
import itertools
import operator
from os.path import dirname, join
import numpy as np
import pandas as pd
import pytest
from pandas.core import ops
from pandas.tests.extension import base
from pandas.tests.extension.conftest import ( # noqa: F401
as_array,
as_frame,
as_series,
fillna_method,
groupby_apply_op,
use_numpy,
)
from pint.errors import DimensionalityError
from pint.testsuite import QuantityTestCase, helpers
import pint_pandas as ppi
from pint_pandas import PintArray
ureg = ppi.PintType.ureg
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series"""
return request.param
@pytest.fixture
def dtype():
return ppi.PintType("pint[meter]")
@pytest.fixture
def data():
return ppi.PintArray.from_1darray_quantity(
np.arange(start=1.0, stop=101.0) * ureg.nm
)
@pytest.fixture
def data_missing():
return ppi.PintArray.from_1darray_quantity([np.nan, 1] * ureg.meter)
@pytest.fixture
def data_for_twos():
x = [
2.0,
] * 100
return ppi.PintArray.from_1darray_quantity(x * ureg.meter)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
@pytest.fixture
def data_repeated(data):
"""Return different versions of data for count times"""
# no idea what I'm meant to put here, try just copying from https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/integer/test_integer.py
def gen(count):
for _ in range(count):
yield data
yield gen
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Simple fixture for testing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
@pytest.fixture
def data_for_sorting():
return ppi.PintArray.from_1darray_quantity([0.3, 10, -50] * ureg.centimeter)
# should probably get more sophisticated and do something like
# [1 * ureg.meter, 3 * ureg.meter, 10 * ureg.centimeter]
@pytest.fixture
def data_missing_for_sorting():
return ppi.PintArray.from_1darray_quantity([4, np.nan, -5] * ureg.centimeter)
# should probably get more sophisticated and do something like
# [4 * ureg.meter, np.nan, 10 * ureg.centimeter]
@pytest.fixture
def na_cmp():
"""Binary operator for comparing NA values."""
return lambda x, y: bool(np.isnan(x.magnitude)) & bool(np.isnan(y.magnitude))
@pytest.fixture
def na_value():
return ppi.PintType("meter").na_value
@pytest.fixture
def data_for_grouping():
# should probably get more sophisticated here and use units on all these
# quantities
a = 1.0
b = 2.0 ** 32 + 1
c = 2.0 ** 32 + 10
return ppi.PintArray.from_1darray_quantity(
[b, b, np.nan, np.nan, a, a, b, c] * ureg.m
)
# === missing from pandas extension docs about what has to be included in tests ===
# copied from pandas/pandas/conftest.py
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
"""
return request.param
@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
# commented functions aren't implemented
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
# "prod",
# "std",
# "var",
"median",
# "kurt",
# "skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
# =================================================================
class TestCasting(base.BaseCastingTests):
pass
class TestConstructors(base.BaseConstructorsTests):
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
result = pd.Series(index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
# GH 33559 - empty index
result = pd.Series(index=[], dtype=dtype)
expected = pd.Series([], index=pd.Index([], dtype="object"), dtype=dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
result = pd.Series(na_value, index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_scalar_with_index(self, data, dtype):
scalar = data[0]
result = pd.Series(scalar, index=[1, 2, 3], dtype=dtype)
expected = pd.Series([scalar] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
result = pd.Series(scalar, index=["foo"], dtype=dtype)
expected = pd.Series([scalar], index=["foo"], dtype=dtype)
self.assert_series_equal(result, expected)
class TestDtype(base.BaseDtypeTests):
pass
class TestGetitem(base.BaseGetitemTests):
def test_getitem_mask_raises(self, data):
mask = np.array([True, False])
msg = f"Boolean index has wrong length: 2 instead of {len(data)}"
with pytest.raises(IndexError, match=msg):
data[mask]
mask =
|
pd.array(mask, dtype="boolean")
|
pandas.array
|
#!/usr/bin/env python
# coding: utf-8
# # As denúncias ao TCM-BA
#
#
# Quanto tempo em média uma denúncia demora para ser julgada pelo Tribunal
# de Contas dos Municípios?
#
# Essa é a pergunta que queremos responder com esse notebook.
#
# ---
#
# A coleta de dados foi feita utilizando scripts do repositório
# [tcm-ba](https://github.com/DadosAbertosDeFeira/tcm-ba) em 17 de Julho de 2021.
# In[17]:
import pandas as pd
import seaborn as sns
df = pd.read_csv("processos-tcm-ba.csv")
# In[19]:
df.sample(3)
# In[21]:
df.shape
# In[6]:
df["nature"].unique()
# In[7]:
df["entry_at"] =
|
pd.to_datetime(df["entry_at"], format="%d/%m/%Y")
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
import datetime
from modeling import get_target_cols
from modeling import lgbm_params, num_boost_round
import lightgbm as lgb
class Prediction(object):
@classmethod
def get_pred_df(cls, X_TR, X_TE):
target_cols = get_target_cols()
# full training
Q_TR = X_TR[['session_id', 'gid']].groupby('session_id').count().reset_index()
Q_TR = Q_TR.rename(columns={'gid': 'query'})
lgb_train = lgb.Dataset(X_TR[target_cols], X_TR["clicked"], group=Q_TR['query'])
model = lgb.train(lgbm_params
, lgb_train
, num_boost_round=num_boost_round
, verbose_eval=1)
# prediction
y_pred = model.predict(X_TE[target_cols], num_iteration=model.best_iteration)
y_pred_df = pd.DataFrame({"gid": X_TE["gid"].values
, "impression": X_TE["impression"].values
, "prob": y_pred})
return y_pred_df
class Submission(object):
@classmethod
def get_sub_df(cls, y_pred_df, IDCOLS_DF, submission_df):
# rank normalization
y_pred_df["prob"] = y_pred_df["prob"].rank(ascending=True)
y_pred_df["prob"] = y_pred_df["prob"].astype(int)
# create submission
y_pred_df["impression"] = y_pred_df["impression"].astype(str)
def create_sub(x):
impressions = list(x.impression)
probs = list(x.prob)
imps_probs = [[i, p] for i, p in zip(impressions, probs)]
imps_probs.sort(key=lambda z: z[1])
imps = [imps_prob[0] for imps_prob in imps_probs]
return " ".join(imps)
mysub_df = y_pred_df.groupby("gid").apply(lambda x: create_sub(x)).reset_index()
mysub_df = pd.merge(mysub_df, IDCOLS_DF, on="gid", how="left")
mysub_df.columns = ["gid", "item_recommendations", "user_id", "session_id", "step"]
mysub_df = mysub_df[["user_id", "session_id", "step", "item_recommendations"]]
mysub_df.columns = ["user_id", "session_id", "step", "item_recommendations_sub"]
sub_df =
|
pd.merge(submission_df, mysub_df, on=["user_id", "session_id", "step"], how="left")
|
pandas.merge
|
import datetime as dt
import json
from urllib import request
import pandas as pd
NOAA_URL = 'https://services.swpc.noaa.gov/json/solar-cycle/observed-solar-cycle-indices.json'
def get_solar_data(url):
"""Download the most recent solar data
"""
response = request.urlopen(url)
if response.status == 200:
data = json.loads(response.read())
else:
print(f'Invalid response! HTTP Status Code: {response.status}')
df = pd.DataFrame(data)
dates = [dt.datetime.strptime(val, '%Y-%m') for val in df['time-tag']]
df.index =
|
pd.DatetimeIndex(dates)
|
pandas.DatetimeIndex
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 13 08:33:48 2018
@author: macbook2
"""
import pandas as pd
import numpy as np
from abc import ABC, abstractmethod
from ib_insync import Index, Option
from option_utilities import time_it, USSimpleYieldCurve, get_theoretical_strike
from spx_data_update import DividendYieldHistory, IbWrapper
from ib_insync.util import isNan
class OptionAsset(ABC):
def __init__(self, mkt_symbol, vol_symbol, exchange_dict):
"""Abstract class for option asset container"""
exchange_mkt = exchange_dict['exchange_mkt']
exchange_vol = exchange_dict['exchange_vol']
exchange_opt = exchange_dict['exchange_opt']
self.trading_class = exchange_dict['trading_class']
underlying_index = Index(mkt_symbol, exchange_mkt)
ibw = IbWrapper()
ib = ibw.ib
self.underlying_qc = self.__get_underlying_qc(underlying_index, ib)
self.sigma_qc = self.get_sigma_qc(vol_symbol, ib, exchange_vol)
self.chain = self.get_option_chain(underlying_index, ib, exchange_opt)
ib.disconnect()
""""" Abstract option asset container - Each underlying instrument is an instance of the OptionAsset class
and each instance is the only argument for the option_market Class. """
@staticmethod
def __get_underlying_qc(underlying_index, ib):
"""Retrieve IB qualifying contracts for an index"""
index_qc = ib.qualifyContracts(underlying_index)
assert(len(index_qc) == 1)
return index_qc[0]
@property
def get_expirations(self):
"""Retrieve Dataframe of option expirations (last trading day) for option chain in object"""
expirations = pd.DataFrame(list(self.chain.expirations),
index=pd.DatetimeIndex(self.chain.expirations),
columns=['expirations'])
timedelta = expirations.index - pd.datetime.today()
expirations['year_fraction'] = timedelta.days / 365
# remove negative when latest expiry is today
expirations = expirations[expirations['year_fraction'] > 0]
return expirations.sort_index()
@abstractmethod
def get_option_chain(self, underlying_index, ib, exchange):
"""Abstract method"""
#
pass
@abstractmethod
def get_sigma_qc(self, vol_symbol, ib, exchange):
"""Abstract method"""
# should return empty string if no pre-calculated vol index exists
pass
@staticmethod
@abstractmethod
def get_dividend_yield():
"""Abstract method - Gets latest dividend yield"""
# should return empty string if no pre-calculated vol index exists
pass
class SpxOptionAsset(OptionAsset):
def __init__(self, trading_class='SPX'):
""""" Asset container for SPX - S&P 500 Index. """
mkt_symbol = 'SPX'
vol_symbol = 'VIX'
exchange_dict = {'exchange_mkt': 'CBOE', 'exchange_vol': 'CBOE', 'exchange_opt': 'CBOE',
'trading_class': trading_class} # other choice is SPXW
super().__init__(mkt_symbol, vol_symbol, exchange_dict)
if trading_class == 'SPXW':
self.settlement_PM = True
else:
self.settlement_PM = False
def get_sigma_qc(self, vol_symbol, ib, exchange):
"""Returns implied Volatility for market"""
sigma_index = Index(vol_symbol, exchange)
sigma_qc = ib.qualifyContracts(sigma_index)
assert(len(sigma_qc) == 1)
return sigma_qc[0]
def get_option_chain(self, underlying_index, ib, exchange):
"""Retrieve IB qualifying options contracts for an index"""
all_chains = ib.reqSecDefOptParams(underlying_index.symbol, '',
underlying_index.secType,
underlying_index.conId)
# TO DO Consider moving this to abstract function as different markets will have different
# conditions around which options to select
chain = next(c for c in all_chains if c.tradingClass == self.trading_class and c.exchange == exchange)
return chain
@staticmethod
def get_dividend_yield():
"""Gets latest dividend yield"""
# TO DO: Add check on date of latest dividend yield
dividend_yield_history = DividendYieldHistory()
dividend_yield = dividend_yield_history.dy_monthly[dividend_yield_history.dy_monthly.columns[0]][-1] / 100
return dividend_yield
class RSL2OptionAsset(OptionAsset):
def __init__(self):
mkt_symbol = 'RUT'
vol_symbol = 'RVX'
exchange_dict = {'exchange_mkt': 'RUSSELL', 'exchange_vol': 'CBOE', 'exchange_opt': 'CBOE'}
super().__init__(mkt_symbol, vol_symbol, exchange_dict)
def get_sigma_qc(self, vol_symbol, ib, exchange):
"""Returns implied Volatility for market"""
sigma_index = Index(vol_symbol, exchange)
sigma_qc = ib.qualifyContracts(sigma_index)
assert(len(sigma_qc) == 1)
return sigma_qc[0]
@staticmethod
def get_option_chain(underlying_index, ib, exchange):
"""Retrieve IB qualifying options contracts for an index"""
all_chains = ib.reqSecDefOptParams(underlying_index.symbol, '',
underlying_index.secType,
underlying_index.conId)
# TO DO Consider moving this to abstract function as different markets will have different
# conditions around which options to select
chain = next(c for c in all_chains if c.tradingClass == underlying_index.symbol and c.exchange == exchange)
return chain
@staticmethod
def get_dividend_yield():
"""Gets latest dividend yield"""
# TO DO: Add check on date of latest dividend yield
# TO DO: Change to RSL2 dividend yield
# dividend_yield_history = DividendYieldHistory()
# dividend_yield = dividend_yield_history.dy_monthly[-1] / 100
print('Warning: RSL2 Using Fixed Dividend yield')
dividend_yield = 0.0134
return dividend_yield
#
# class _emfOptionAsset(OptionAsset):
# def __init__(self, mkt_symbol='MXEF', vol_symbol='VXEEM', exchange=('CBOE', 'CBOE'), \
# currency='USD', multiplier='100', sec_type='IND'):
# super().__init__(mkt_symbol, vol_symbol, exchange, \
# currency, multiplier, sec_type)
# self.listing_spread = 10
#
# @staticmethod
# def get_option_implied_dividend_yld():
# """Returns latest dividend yield for market"""
# url = 'http://www.wsj.com/mdc/public/page/2_3021-peyield.html'
# # Package the request, send the request and catch the response: r
# raw_html_tbl = pd.read_html(url)
# dy_df = raw_html_tbl[2]
# latest_dividend_yield = float(dy_df.iloc[2, 4]) /100
# return latest_dividend_yield
class TradeChoice:
def __init__(self, tickers, mkt_prices, account_value, z_score, yield_curve, trade_date, option_expiry):
self.tickers = tickers
self.spot = mkt_prices[0]
self.sigma = mkt_prices[1]
self.account_value = account_value
self.z_score = z_score
# last_trade_dates = [item.contract.lastTradeDateOrContractMonth for item in self.tickers]
# unique_last_trade_dates = pd.to_datetime(list(dict.fromkeys(last_trade_dates)))
self.expirations = option_expiry
self.yield_curve = yield_curve
self.trade_date = trade_date
@property
def strike_grid(self):
strikes = [item.contract.strike for item in self.tickers]
strike_array = np.array(strikes).astype(int).reshape(len(self.expirations),
len(strikes) // len(self.expirations))
df_out = pd.DataFrame(strike_array, index=self.expirations, columns=self.z_score)
df_out = self._format_index(df_out)
return df_out
@property
def premium_grid(self):
premium_mid = [item.marketPrice() for item in self.tickers]
premium_mid = np.round(premium_mid, 2)
premium_mid = premium_mid.reshape(len(self.expirations),
len(premium_mid) // len(self.expirations))
df_out = pd.DataFrame(premium_mid, index=self.expirations, columns=self.z_score)
df_out = self._format_index(df_out)
return df_out
@property
def prices_grid(self):
bid, ask = zip(*[(item.bid, item.ask) for item in self.tickers])
list_val = [np.array(item).reshape((len(self.expirations),
len(item) // len(self.expirations))) for item in [bid, ask]]
df_lst = [pd.DataFrame(item, index=self.expirations, columns=self.z_score) for item in list_val]
df_out = df_lst[0].astype(str) + '/' + df_lst[1].astype(str)
df_out = self._format_index(df_out)
return df_out
def pct_otm_grid(self, last_price):
df_out = self.strike_grid / last_price - 1
return df_out
def option_lots(self, leverage, capital_at_risk):
risk_free = self.yield_curve.get_zero4_date(self.expirations.date) / 100
option_life = np.array([timeDelta.days / 365 for timeDelta in
[expiryDate - self.trade_date for expiryDate in self.expirations]])
strike_discount = np.exp(- risk_free.mul(option_life))
strike_discount = strike_discount.squeeze() # convert to series
notional_capital = self.strike_grid.mul(strike_discount, axis=0) - self.premium_grid
contract_lots = [round(capital_at_risk / (notional_capital.copy() / num_leverage * 100), 0)
for num_leverage in leverage]
for counter, df in enumerate(contract_lots):
df.index.name = 'Lev %i' % leverage[counter]
contract_lots = [df.apply(pd.to_numeric, downcast='integer') for df in contract_lots]
return contract_lots
def margin(self, last_price):
# 100% of premium + 20% spot price - (spot-strike)
otm_margin = last_price - self.strike_grid
otm_margin[otm_margin < 0] = 0
single_margin_a = (self.premium_grid + 0.2 * last_price) - (last_price - self.strike_grid)
# 100% of premium + 10% * strike
single_margin_b = self.premium_grid + 0.1 * self.strike_grid
margin = pd.concat([single_margin_a, single_margin_b]).max(level=0)
margin = margin * int(self.tickers[0].contract.multiplier)
return margin
@staticmethod
def _format_index(df_in):
df_out = df_in.set_index(df_in.index.tz_localize(None).normalize())
return df_out
class OptionMarket:
"""IB Interface class that fetches data from IB to pass to trade choice object
Args:
param1 (OptionAsset): Option asset that contains description of underlying asset.
"""
def __init__(self, opt_asset: OptionAsset):
self.option_asset = opt_asset
self.trade_date = pd.DatetimeIndex([
|
pd.datetime.today()
|
pandas.datetime.today
|
import glob
import os
import pandas as pd
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from typing import Dict, List, Union, Tuple
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
from networks.classes.centernet.datasets.ClassificationDataset import ClassificationDataset
class ModelCenterNet:
def __init__(self, logs: Dict):
self.__logs = logs
self.__input_width: int = None
self.__input_height: int = None
def build_model(self,
model_generator,
input_shape: Tuple[int, int, int], mode: str,
n_category: int = 1) -> tf.keras.Model:
"""
Builds the network.
:param model_generator: a generator for the network
:param input_shape: the shape of the input images
:param mode: the type of model that must be generated
:param n_category: the number of categories (possible classes). Defaults to 1 in order to detect the
presence or absence of an object only (and not its label).
:return: a Keras model
"""
self.__input_width = input_shape[0]
self.__input_height = input_shape[1]
self.__logs['execution'].info('Building {} model...'.format(mode))
return model_generator.generate_model(input_shape, mode, n_category)
@staticmethod
def setup_callbacks(weights_log_path: str, batch_size: int, lr: float) -> List[
tf.keras.callbacks.Callback]:
"""
Sets up the callbacks for the training of the model.
"""
# Setup callback to save the best weights after each epoch
checkpointer = ModelCheckpoint(filepath=os.path.join(weights_log_path,
'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
verbose=0,
save_best_only=True,
save_weights_only=True,
monitor='val_loss',
mode='min')
tensorboard_log_dir = os.path.join(weights_log_path, 'tensorboard')
# Note that update_freq is set to batch_size * 10,
# because the epoch takes too long and batch size too short
tensorboard = TensorBoard(log_dir=tensorboard_log_dir,
write_graph=True,
histogram_freq=0,
write_grads=True,
write_images=False,
batch_size=batch_size,
update_freq=batch_size * 10)
def lrs(epoch):
if epoch > 10:
return lr / 10
elif epoch > 6:
return lr / 5
else:
return lr
lr_schedule = LearningRateScheduler(lrs, verbose=1)
return [tensorboard, checkpointer, lr_schedule]
def restore_weights(self,
model: tf.keras.Model,
init_epoch: int,
weights_folder_path: str) -> None:
"""
Restores the weights from an existing weights file
:param model:
:param init_epoch:
:param weights_folder_path:
"""
init_epoch_str = '0' + str(init_epoch) if init_epoch < 10 else str(init_epoch)
restore_path_reg = os.path.join(weights_folder_path, 'weights.{}-*.hdf5'.format(init_epoch_str))
list_files = glob.glob(restore_path_reg)
assert len(list_files) > 0, \
'ERR: No weights file match provided name {}'.format(restore_path_reg)
# Take real filename
restore_filename = list_files[0].split('/')[-1]
restore_path = os.path.join(weights_folder_path, restore_filename)
assert os.path.isfile(restore_path), \
'ERR: Weight file in path {} seems not to be a file'.format(restore_path)
self.__logs['execution'].info("Restoring weights in file {}...".format(restore_filename))
model.load_weights(restore_path)
def train(self,
dataset: Union[tf.data.Dataset, ClassificationDataset],
model: tf.keras.Model,
init_epoch: int,
epochs: int,
batch_size: int,
callbacks: List[tf.keras.callbacks.Callback],
class_weights=None,
augmentation: bool = False):
"""
Compiles and trains the model for the specified number of epochs.
"""
self.__logs['training'].info('Training the model...\n')
# Display the architecture of the model
self.__logs['training'].info('Architecture of the model:')
model.summary()
# Train the model
self.__logs['training'].info('Starting the fitting procedure:')
self.__logs['training'].info('* Total number of epochs: ' + str(epochs))
self.__logs['training'].info('* Initial epoch: ' + str(init_epoch) + '\n')
training_set, training_set_size = dataset.get_training_set()
validation_set, validation_set_size = dataset.get_validation_set()
training_steps = training_set_size // batch_size + 1
validation_steps = validation_set_size // batch_size + 1
if augmentation:
x_train, y_train = dataset.get_xy_training()
x_val, y_val = dataset.get_xy_validation()
train_image_data_generator = ImageDataGenerator(brightness_range=[0.7, 1.0],
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=.1)
val_image_data_generator = ImageDataGenerator()
train_generator = train_image_data_generator.flow_from_dataframe(
dataframe=
|
pd.DataFrame({'image': x_train, 'class': y_train})
|
pandas.DataFrame
|
import datetime
import dateutil
import re
import pandas as pd
from robot.api import logger
from faker import Faker
import random
import generic
functions_list = ["$SSN$", '$date$', '$datetime$', '$GS1$']
def get_global_test_data_to_dictionary(file_path, colname, csv_file_delimiter, file_names):
'''
Author : <NAME>
Description : to get data from global data csv
:param file_path: path of CSV file
:param colname: name of column
:param csv_file_delimiter: delimiter used in CSV file
:return: dictionary
'''
datadict = {}
for file in file_names.split(','):
all_dictionary = generic.csv_to_dictionary(file_path+'\\'+file, csv_file_delimiter)
# datadict = update_value(all_dictionary[colname], file_path, csv_file_delimiter)
datadict.update(all_dictionary[colname])
#print("Before RUN Global dictionary : ", datadict)
logger.debug('<B>Before RUN Global Data dictionary : </B>'+str(datadict))
return datadict
def get_test_data_to_dictionary(file_path, csv_file_delimiter):
"""
Author : <NAME>
Description : to get data from test data csv
:param file_path: path of file without file extension
:param csv_file_delimiter: delimiter for csv file
:return: dictionary
"""
datadict = {}
datadict = generic.csv_to_dictionary(file_path + ".csv", csv_file_delimiter)
logger.debug('<B>Before RUN Test Data dictionary : </B>' + str(datadict))
return datadict
def update_dictionary(global_dictionary, data_dictionary , file_path, csv_file_delimiter, stritr=''):
"""
Author : <NAME>
Description : update values in global dictionary
:param global_dictionary:
:param data_dictionary:
:param file_path:
:param csv_file_delimiter:
:param stritr:
:return: dictionary object
"""
datadict = global_dictionary
for key in datadict:
text = datadict[key]
cell_value = text
text = str(text)
logger.debug(key+' : '+text)
#logger.debug(text[0:3].upper())
if text[0:3].upper() == "ITR":
cell_value = data_dictionary[cell_value][key]
logger.debug("Updated Test Dictionary Node : " + "[" + key + "] : " + str(cell_value))
if text.startswith(r'[') and text.endswith(r']'):
text = text[1:-1]
values = text.split(sep=',')
print(values)
for i, item in enumerate(values):
# values[i] = item.strip()
if '%' in values[i]:
#values[i] = item[1:-1]
#values[i] = datadict[values[i]]
key_name = re.findall(r'%.*%', values[i])[0][1:-1]
if key_name in datadict.keys():
values[i] = re.sub(r'%'+key_name+'%', str(datadict[key_name]), values[i])
else:
pass
#values[i] = updated_value
elif values[i][0:3].upper() == "ITR":
values[i] = data_dictionary[values[i]][key]
else:
pass
if len(values) > 1:
if values[1].lower() == '+':
cell_value = int(values[0]) + int(values[2])
elif values[1].lower() == '-':
cell_value = int(values[0]) - int(values[2])
elif values[0].lower() == 'concat':
#cell_value = values[0] + ' ' + values[2]
cell_value = str(values[1]).join(values[2:])
print("cell_value : ", cell_value)
elif values[0].lower() == 'date':
cell_value = datetime.datetime.today() if values[1].lower() == 'today' else dateutil.parse(values[1])
cell_value = generic.add_date(cell_value, int(values[2]), values[3])
cell_value = cell_value.date().strftime('%Y-%m-%d')
elif values[0].lower() == 'datetime':
cell_value = datetime.datetime.today() if values[1].lower() == 'today' else dateutil.parse(values[1])
cell_value = generic.add_date(cell_value, int(values[2]), values[3])
cell_value = cell_value.strftime('%Y-%m-%d %H:%M:%S')
elif values[0].lower() == 'timestamp':
cell_value = datetime.datetime.today() if values[2].lower() == 'today' else dateutil.parse(values[2])
cell_value = generic.add_date(cell_value, int(values[3]), values[4])
cell_value = cell_value.strftime('%Y-%m-%d %H:%M:%S')
cell_value = generic.get_timestamp(cell_value, int(values[1]))
else:
pass
print(cell_value)
else:
cell_value = values[0]
elif text.startswith(r'$') and text.endswith(r'$'):
if text in functions_list:
if text == "$SSN$":
#cell_value = Generic.pop_csv(file_path+"SSN.csv", datadict["DATASET"], csv_file_delimiter)
cell_value = generic.get_SSN('Sweden')
cell_value = re.sub(r'\..*', "", str(cell_value), re.I)
print("SSN : " + cell_value)
if text == "$GS1$":
cell_value = generic.get_GS1_number('735999', '18')
cell_value = re.sub(r'\..*', "", str(cell_value), re.I)
print("GS1 : " + cell_value)
if text == "$date$":
cell_value = datetime.datetime.today()
cell_value = cell_value.strftime('%Y-%m-%d')
if text == "$datetime$":
cell_value = datetime.datetime.today()
cell_value = cell_value.strftime('%Y-%m-%d %H:%M:%S')
elif text == '':
if key in data_dictionary[stritr].keys():
if data_dictionary[stritr][key] != '':
cell_value = data_dictionary[stritr][key]
else:
pass
datadict[key] = cell_value
logger.debug('<B>After update value in Global Data dictionary : </B>' + str(datadict), html=True)
return datadict
def marge_test_dictionaries(itr, test_dictionary, gobal_dictionary):
"""
Author : <NAME>
Description : Marge updated value from global dictionary to test dictionary with respective iteration
:param itr: iteration
:param test_dictionary: test data dictionary
:param gobal_dictionary: global data dictionary
:return: dictionary object
"""
for iteration in list(test_dictionary.keys()):
for key in list(gobal_dictionary.keys()):
if key in list(test_dictionary[iteration].keys()):
pass
#test_dictionary[iteration][key] = gobal_dictionary[key]
else:
if iteration == 'KEYS':
test_dictionary[iteration][key] = key
else:
test_dictionary[iteration][key] = ''
test_dictionary[itr].update(gobal_dictionary)
logger.debug('<B>After Marging Globale dictionary to Test data dictionary : </B>' + str(test_dictionary), html=True)
return test_dictionary
def test_dictionary_to_csv(dictdata, file_path, csv_file_delimiter):
"""
Author : <NAME>
Description : import test data dictionary to test data csv
:param dictdata: test data dictionary
:param file_path: imported to path
:param csv_file_delimiter:
:return:
"""
column_list = dictdata['KEYS'].keys()
df = pd.DataFrame.from_dict(dictdata)
df.index = column_list
df.drop(columns=['KEYS'])
print(df)
columns_header = list(dictdata.keys())
#columns_header.remove('KEYS')
df.to_csv(path_or_buf=file_path+'.csv', sep=csv_file_delimiter, index=False, columns=columns_header, index_label=True)
def generate_test_data(region, number, file_path, csv_file_delimiter="|"):
"""
Author : <NAME>
:param region:
:param x:
:param file_path:
:param csv_file_delimiter:
:return:
"""
print(region)
fake = Faker(str(region))
cust_data = {}
for i in range(0, int(number)):
cust_data[i] = {}
cust_data[i]['id'] = random.randrange(999, 10000)
cust_data[i]['first_name'] = fake.first_name()
cust_data[i]['last_name'] = fake.last_name()
cust_data[i]['name'] = cust_data[i]['first_name'] + ' ' + cust_data[i]['last_name']
cust_data[i]['ssn'] = fake.ssn()
cust_data[i]['dob'] = fake.date_of_birth(tzinfo=None, minimum_age=10, maximum_age=115).strftime("%x")
cust_data[i]['email'] = fake.email()
cust_data[i]['phone'] = fake.phone_number()
cust_data[i]['street_name'] = fake.street_name()
cust_data[i]['street_number'] = random.randrange(1, 99)
cust_data[i]['flat_number'] = random.randrange(1, 999)
cust_data[i]['floor_number'] = random.randrange(1, 20)
cust_data[i]['postcode'] = fake.postcode()
cust_data[i]['city'] = fake.city_name()
cust_data[i]['address'] = fake.address()
cust_data[i]['comment'] = fake.text()
cust_data[i]['latitude'] = str(fake.latitude())
cust_data[i]['longitude'] = str(fake.longitude())
logger.debug(cust_data)
df =
|
pd.DataFrame.from_dict(cust_data, orient='index')
|
pandas.DataFrame.from_dict
|
import forecaster as fc
import optimizer as opt
import trader as td
import datetime as dt
import utilities as util
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def calc_start_date(end_date=dt.datetime(2017,1,1), data_size=12):
return end_date-dt.timedelta(weeks=int(data_size * 52/12))
def run_today(start_date=dt.datetime(2015,1,1), end_date=dt.datetime(2017,1,1), n_days=21, data_size=12,
myport=['AAPL', 'GOOG'], allocations=[0.5,0.5],
train_size=0.7, max_k=50, max_trade_size=0.1, gen_plot=False, verbose=False, savelogs=False):
"""
:param start_date: Beginning of time period
:param end_date: End of time period
:param n_days: Number of days into the future to predict the daily returns of a fund
:param data_size: The number of months of data to use in the machine learning model.
:param myport: The funds available in your portfolio
:param allocations: The percentage of your portfolio invested in the funds
:param train_size: The percentage of data used for training the ML model, remained used for testing.
:param max_k: Maximum number of neighbors used in kNN
:param max_trade_size: The maximum percentage of your portfolio permitted to be traded in any one transaction.
:param gen_plot: Boolean to see if you want to plot results
:param verbose: Boolean to print out information during execution of application.
:return:
"""
start_date = calc_start_date(end_date, data_size)#end_date - dt.timedelta(weeks=int(data_size * 52/12))
#print('start:', start_date, 'end:', end_date)
if verbose: print('-'*20 + '\nFORECAST\n' + '-'*20)
forecast = fc.forecast(start_date, end_date, symbols=myport, train_size=train_size,
n_days=n_days, max_k=max_k, gen_plot=gen_plot, verbose=verbose, savelogs=savelogs)
if verbose: print('\n'+'-'*20 + '\nOPTIMIZE\n' + '-'*20)
target_allocations = opt.optimize_return(forecast, myport, allocations, gen_plot=gen_plot, verbose=verbose, savelogs=savelogs)
if verbose: print('\n' + '-'*20 + '\nORDERS\n' + '-'*20)
trade_date = forecast.index.max()
orders = td.create_orders(myport, allocations, target_allocations, trade_date=trade_date,max_trade_size=max_trade_size, verbose=verbose, savelogs=savelogs)
if verbose: print(orders)
new_allocations = allocations.copy()
for i in range(orders.shape[0]):
# fix this code so that the correct allocations are updated!
index = myport.index(orders.loc[i, 'Symbol'])
#symbol = orders.loc[i, 'Symbol']
if orders.loc[i, 'Action'] == 'SELL':
new_allocations[index] -= orders.loc[i, 'Quantity']
else:
new_allocations[index] += orders.loc[i, 'Quantity']
adr_current, vol_current, sr_current, pv_current = util.compute_returns(forecast, allocations=allocations)
adr_target, vol_target, sr_target, pv_target = util.compute_returns(forecast, allocations=target_allocations)
adr_new, vol_new, sr_new, pv_new = util.compute_returns(forecast, allocations=new_allocations)
if verbose:
print("Portfolios:", "Current", "Target","New")
print("Daily return: %.5f %.5f %.5f" % (adr_current, adr_target, adr_new))
print("Daily Risk: %.5f %.5f %.5f" % (vol_current, vol_target, vol_new))
print("Sharpe Ratio: %.5f %.5f %.5f" % (sr_current, sr_target, sr_new))
print("Return vs Risk: %.5f %.5f %.5f" % (adr_current/vol_current, adr_target/vol_target, adr_new/vol_new))
print("\nALLOCATIONS\n" + "-" * 40)
print("Symbol", "Current", "Target", 'New')
for i, symbol in enumerate(myport):
print("%s %.3f %.3f %.3f" %
(symbol, allocations[i], target_allocations[i], new_allocations[i]))
# Compare daily portfolio value with SPY using a normalized plot
if gen_plot:
fig, ax = plt.subplots()
ax.scatter(vol_current, adr_current, c='green', s=15, alpha=0.5) # Current portfolio
ax.scatter(vol_target, adr_target, c='red', s=15, alpha=0.5) # ef
ax.scatter(vol_new, adr_new, c='black', s=25, alpha=0.75) # ef
ax.set_xlabel('St. Dev. Daily Returns')
ax.set_ylabel('Mean Daily Returns')
#ax.set_xlim(min(vol)/1.5, max(vol)*1.5)
#ax.set_ylim(min(adr)/1.5, max(adr)*1.5)
ax.grid()
ax.grid(linestyle=':')
fig.tight_layout()
plt.show()
# add code to plot here
df_temp = pd.concat([pv_current, pv_target, pv_new], keys=['Current', 'Target', 'New'], axis=1)
df_temp = df_temp / df_temp.ix[0, :]
util.plot_data(df_temp, 'Forecasted Daily portfolio value and SPY', 'Date-21', 'Normalized Price')
if False: # meh was going to plot portfolio values for the last year but trying something else now
prior_prices = util.load_data(myport, start_date, end_date)
prior_prices.fillna(method='ffill', inplace=True)
prior_prices.fillna(method='bfill', inplace=True)
#prices_SPY = prior_prices['SPY'] # SPY prices, for benchmark comparison
prior_prices = prior_prices[myport] # prices of portfolio symbols
forecast_prices = forecast * prior_prices
time_span = pd.date_range(forecast.index.min(), end_date + dt.timedelta(days=n_days*2))
forecast_prices = forecast_prices.reindex(time_span)
forecast_prices = forecast_prices.shift(periods=n_days*2)
forecast_prices = forecast_prices.dropna()
forecast_prices = pd.concat([prior_prices, forecast_prices], axis=0)
adr_current, vol_current, sr_current, pv_current = util.compute_returns(forecast_prices, allocations=allocations)
adr_target, vol_target, sr_target, pv_target = util.compute_returns(forecast_prices, allocations=target_allocations)
adr_new, vol_new, sr_new, pv_new = util.compute_returns(forecast_prices, allocations=new_allocations)
df_temp = pd.concat([pv_current, pv_target, pv_new], keys=['Current', 'Target', 'New'], axis=1)
df_temp = df_temp / df_temp.ix[0, :]
util.plot_data(df_temp, 'Daily portfolio value and SPY', 'Date', 'Normalized Price')
return new_allocations, trade_date
def test_experiment_one(n_days=21, data_size=12, train_size=0.7, max_k=50, max_trade_size=0.1,
years_to_go_back=2, initial_investment=10000, gen_plot=False, verbose=False, savelogs=False):
today = dt.date.today()
yr = today.year - years_to_go_back
mo = today.month - 1 # Just temporary, take out 1 when data download is fixed.
da = today.day - 1
start_date = dt.datetime(yr, mo, da)
end_date = dt.datetime(yr + 1, mo, da)
adr = [None] * 12
vol = [None] * 12
sr = [None] * 12
myport = ['AAPL', 'GLD']
myalloc = [0.5,0.5]
# Portfolio values for Holding the Same Allocation (conservative case)
actual_prices = util.load_data(myport, start_date, end_date)
actual_prices.fillna(method='ffill', inplace=True)
actual_prices.fillna(method='bfill', inplace=True)
prices_SPY = actual_prices['SPY']
actual_prices = actual_prices[myport]
adr_cons, vol_cons, sharpe_cons, pv_cons = util.compute_returns(actual_prices, myalloc, sf=252.0, rfr=0.0)
# Portfolio values with monthly optimization using hindsight (best possible case)
# Portfolio values for Machine Learner
ml_allocs = []
ml_trade_dates = []
for i in range(int(252/n_days)):
temp = round(i*52*n_days/252)
test_date = start_date + dt.timedelta(weeks=round(i*52*n_days/252))
#print(i, temp, test_date)
if verbose: print(('EXPERIMENT %i - %s') % (i, str(test_date.strftime("%m/%d/%Y"))))
myalloc, trade_date = run_today(end_date=test_date, n_days=n_days, data_size=data_size,
myport=myport, allocations=myalloc,
train_size=train_size, max_k=max_k,
max_trade_size=max_trade_size, gen_plot=gen_plot, verbose=verbose, savelogs=savelogs)
ml_allocs.append(myalloc)
ml_trade_dates.append(trade_date)
ml_allocations =
|
pd.DataFrame(data=ml_allocs, index=ml_trade_dates, columns=myport)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
ht_fail =
|
pd.read_csv('/content/sample_data/heart failur classification dataset.csv')
|
pandas.read_csv
|
from functools import reduce
import re
import numpy as np
import pandas as pd
from avaml import _NONE
from avaml.aggregatedata.__init__ import DatasetMissingLabel
from avaml.score.overlap import calc_overlap
__author__ = 'arwi'
VECTOR_WETNESS_LOOSE = {
_NONE: (0, 0),
"new-loose": (0, 1),
"wet-loose": (1, 1),
"new-slab": (0, 0.4),
"drift-slab": (0, 0.2),
"pwl-slab": (0, 0),
"wet-slab": (1, 0),
"glide": (0.8, 0),
}
VECTOR_FREQ = {
"dsize": {
_NONE: 0,
'0': 0,
'1': 0.2,
'2': 0.4,
'3': 0.6,
'4': 0.8,
'5': 1,
},
"dist": {
_NONE: 0,
'0': 0,
'1': 0.25,
'2': 0.5,
'3': 0.75,
'4': 1,
},
"trig": {
_NONE: 0,
'0': 0,
'10': 1 / 3,
'21': 2 / 3,
'22': 1,
},
"prob": {
_NONE: 0,
'0': 0,
'2': 1 / 3,
'3': 2 / 3,
'5': 1,
},
}
class Score:
def __init__(self, labeled_data):
def to_vec(df):
level_2 = ["wet", "loose", "freq", "lev_max", "lev_min", "lev_fill", "aspect"]
columns = pd.MultiIndex.from_product([["global"], ["danger_level", "emergency_warning"]]).append(
pd.MultiIndex.from_product([[f"problem_{n}" for n in range(1, 4)], level_2])
)
vectors = pd.DataFrame(index=df.index, columns=columns)
vectors[("global", "danger_level")] = df[("CLASS", _NONE, "danger_level")].astype(np.int) / 5
vectors[("global", "emergency_warning")] = (
df[("CLASS", _NONE, "emergency_warning")] == "Naturlig utløste skred"
).astype(np.int)
for idx, row in df.iterrows():
for prob_n in [f"problem_{n}" for n in range(1, 4)]:
problem = row["CLASS", _NONE, prob_n]
if problem == _NONE:
vectors.loc[idx, prob_n] = [0, 0, 0, 0, 0, 2, "00000000"]
else:
p_class = row["CLASS", problem]
p_real = row["REAL", problem]
wet = VECTOR_WETNESS_LOOSE[problem][0]
loose = VECTOR_WETNESS_LOOSE[problem][1]
freq = reduce(lambda x, y: x * VECTOR_FREQ[y][p_class[y]], VECTOR_FREQ.keys(), 1)
lev_max = float(p_real["lev_max"]) if p_real["lev_max"] else 0.0
lev_min = float(p_real["lev_min"]) if p_real["lev_min"] else 0.0
lev_fill = int(p_class["lev_fill"]) if p_class["lev_fill"] else 0
aspect = row["MULTI", problem, "aspect"]
vectors.loc[idx, prob_n] = [wet, loose, freq, lev_max, lev_min, lev_fill, aspect]
return vectors
if labeled_data.label is None or labeled_data.pred is None:
raise DatasetMissingLabel()
self.label_vectors = to_vec(labeled_data.label)
self.pred_vectors = to_vec(labeled_data.pred)
def calc(self):
weights = np.array([0.20535988, 0.0949475, 1.])
diff_cols = [not re.match(r"^(lev_)|(aspect)", col) for col in self.label_vectors.columns.get_level_values(1)]
diff = self.pred_vectors.loc[:, diff_cols] - self.label_vectors.loc[:, diff_cols]
p_score_cols = pd.MultiIndex.from_tuples([("global", "problem_score")]).append(
pd.MultiIndex.from_product([[f"problem_{n}" for n in range(1, 4)], ["spatial_diff"]])
)
p_score = pd.DataFrame(index=diff.index, columns=p_score_cols)
for idx, series in self.label_vectors.iterrows():
problem_score, spatial_diffs = dist(series, self.pred_vectors.loc[idx])
p_score.loc[idx] = np.array([problem_score] + spatial_diffs)
maxdist = np.power(weights, 2).sum()
score = np.power(
|
pd.concat([diff.iloc[:, :2], p_score[[("global", "problem_score")]]], axis=1)
|
pandas.concat
|
import numpy as np
from pandas import Period, Series, date_range, period_range
import pandas._testing as tm
class TestCombineFirst:
def test_combine_first_period_datetime(self):
# GH#3367
didx = date_range(start="1950-01-31", end="1950-07-31", freq="M")
pidx = period_range(start=Period("1950-1"), end=Period("1950-7"), freq="M")
# check to be consistent with DatetimeIndex
for idx in [didx, pidx]:
a =
|
Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
|
pandas.Series
|
import pytest
from pandas import (
Index,
Series,
date_range,
)
import pandas._testing as tm
class TestSeriesDelItem:
def test_delitem(self):
# GH#5542
# should delete the item inplace
s = Series(range(5))
del s[0]
expected = Series(range(1, 5), index=range(1, 5))
tm.assert_series_equal(s, expected)
del s[1]
expected = Series(range(2, 5), index=range(2, 5))
tm.assert_series_equal(s, expected)
# only 1 left, del, add, del
s = Series(1)
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
s[0] = 1
tm.assert_series_equal(s,
|
Series(1)
|
pandas.Series
|
import databricks.koalas as ks
import numpy as np
# These function can return a Column Expression or a list of columns expression
# Must return None if the data type can not be handle
import pandas as pd
from pyspark.sql import functions as F
from optimus.engines.base.commons.functions import word_tokenize
from optimus.engines.base.dataframe.functions import DataFrameBaseFunctions
from optimus.engines.base.pandas.functions import PandasBaseFunctions
from optimus.helpers.core import val_to_list, one_list_to_val
from optimus.helpers.raiseit import RaiseIt
class SparkFunctions(PandasBaseFunctions, DataFrameBaseFunctions):
_engine = ks
def _to_float(self, series):
"""
Converts a series values to floats
"""
return series.astype("float")
def _to_integer(self, series, default=0):
"""
Converts a series values to integers
"""
return series.astype("integer")
def _to_string(self, series):
"""
Converts a series values to strings
"""
return series.astype("str")
def _to_boolean(self, series):
"""
Converts a series values to bool
"""
return series.astype("bool")
def hist(col_name, df, buckets, min_max=None, dtype=None):
"""
Create a columns expression to calculate a column histogram
:param col_name:
:param df:
:param buckets:
:param min_max: Min and max value necessary to calculate the buckets
:param dtype: Column datatype to calculate the related histogram. Int, String and Dates return different histograms
:return:
"""
PYSPARK_NUMERIC_TYPES = ["byte", "short", "big", "int", "double", "float"]
def is_column_a(df, column, dtypes):
"""
Check if column match a list of data types
:param df: dataframe
:param column: column to be compared with
:param dtypes: types to be checked
:return:
"""
column = val_to_list(column)
if len(column) > 1:
RaiseIt.length_error(column, 1)
data_type = tuple(val_to_list(parse_spark_dtypes(dtypes)))
column = one_list_to_val(column)
# Filter columns by data type
return isinstance(df.schema[column].dataType, data_type)
def create_exprs(_input_col, _buckets, _func):
def count_exprs(_exprs):
return F.sum(F.when(_exprs, 1).otherwise(0))
_exprs = []
for i, b in enumerate(_buckets):
lower = b["lower"]
upper = b["upper"]
if is_numeric(lower):
lower = round(lower, 2)
if is_numeric(upper):
upper = round(upper, 2)
if len(_buckets) == 1:
count = count_exprs(
(_func(_input_col) == lower))
else:
if i == len(_buckets):
count = count_exprs(
(_func(_input_col) > lower) & (_func(_input_col) <= upper))
else:
count = count_exprs(
(_func(_input_col) >= lower) & (_func(_input_col) < upper))
info = F.create_map(F.lit("count"), count.cast("int"), F.lit("lower"), F.lit(lower), F.lit("upper"),
F.lit(upper)).alias(
"hist_agg" + "_" + _input_col + "_" + str(b["bucket"]))
_exprs.append(info)
_exprs = F.array(*_exprs).alias("hist" + _input_col)
return _exprs
def hist_numeric(_min_max, _buckets):
if _min_max is None:
_min_max = df.agg(F.min(col_name).alias("min"), F.max(col_name).alias("max")).to_dict()[0]
if _min_max["min"] is not None and _min_max["max"] is not None:
_buckets = create_buckets(_min_max["min"], _min_max["max"], _buckets)
_exprs = create_exprs(col_name, _buckets, F.col)
else:
_exprs = None
return _exprs
def hist_string(_buckets):
_buckets = create_buckets(0, 50, _buckets)
func = F.length
return create_exprs(col_name, _buckets, func)
def hist_date():
now = datetime.datetime.now()
current_year = now.year
oldest_year = 1950
# Year
_buckets = create_buckets(oldest_year, current_year, current_year - oldest_year)
func = F.year
year = create_exprs(col_name, _buckets, func)
# Month
_buckets = create_buckets(1, 12, 11)
func = F.month
month = create_exprs(col_name, _buckets, func)
# Day
_buckets = create_buckets(1, 31, 31)
func = F.dayofweek
day = create_exprs(col_name, _buckets, func)
# Hour
_buckets = create_buckets(0, 23, 23)
func = F.hour
hour = create_exprs(col_name, _buckets, func)
# Min
_buckets = create_buckets(0, 60, 60)
func = F.minute
minutes = create_exprs(col_name, _buckets, func)
# Second
_buckets = create_buckets(0, 60, 60)
func = F.second
second = create_exprs(col_name, _buckets, func)
exprs = F.create_map(F.lit("years"), year, F.lit("months"), month, F.lit("weekdays"), day,
F.lit("hours"), hour, F.lit("minutes"), minutes, F.lit("seconds"), second)
return exprs
if dtype is not None:
col_dtype = dtype[col_name]["dtype"]
if col_dtype == "int" or col_dtype == "decimal":
exprs = hist_numeric(min_max, buckets)
elif col_dtype == "string":
exprs = hist_string(buckets)
elif col_dtype == "date":
exprs = hist_date()
else:
exprs = None
else:
if is_column_a(df, col_name, PYSPARK_NUMERIC_TYPES):
exprs = hist_numeric(min_max, buckets)
elif is_column_a(df, col_name, "str"):
exprs = hist_string(buckets)
elif is_column_a(df, col_name, "date") or is_column_a(df, col_name, "timestamp"):
exprs = hist_date()
else:
exprs = None
return exprs
def create_exprs(_input_col, _buckets, _func):
def count_exprs(_exprs):
return F.sum(F.when(_exprs, 1).otherwise(0))
_exprs = []
for i, b in enumerate(_buckets):
lower = b["lower"]
upper = b["upper"]
if is_numeric(lower):
lower = round(lower, 2)
if is_numeric(upper):
upper = round(upper, 2)
if len(_buckets) == 1:
count = count_exprs(
(_func(_input_col) == lower))
else:
if i == len(_buckets):
count = count_exprs(
(_func(_input_col) > lower) & (_func(_input_col) <= upper))
else:
count = count_exprs(
(_func(_input_col) >= lower) & (_func(_input_col) < upper))
info = F.create_map(F.lit("count"), count.cast("int"), F.lit("lower"), F.lit(lower), F.lit("upper"),
F.lit(upper)).alias(
"hist_agg" + "_" + _input_col + "_" + str(b["bucket"]))
_exprs.append(info)
_exprs = F.array(*_exprs).alias("hist" + _input_col)
return _exprs
@staticmethod
def dask_to_compatible(dfd):
from optimus.helpers.converter import dask_dataframe_to_pandas
return ks.from_pandas(dask_dataframe_to_pandas(dfd))
@staticmethod
def new_df(*args, **kwargs):
return ks.from_pandas(pd.DataFrame(*args, **kwargs))
@staticmethod
def df_concat(df_list):
return ks.concat(df_list, axis=0, ignore_index=True)
def word_tokenize(self, series):
return self.to_string(series).map(word_tokenize, na_action=None)
def count_zeros(self, series, *args):
return int((self.to_float(series).values == 0).sum())
def kurtosis(self, series):
return self.to_float(series).kurtosis()
def skew(self, series):
return self.to_float(series).skew()
def exp(self, series):
return np.exp(self.to_float(series))
def sqrt(self, series):
return np.sqrt(self.to_float(series))
def reciprocal(self, series):
return np.reciprocal(self.to_float(series))
def radians(self, series):
return np.radians(self.to_float(series))
def degrees(self, series):
return np.degrees(self.to_float(series))
def ln(self, series):
return np.log(self.to_float(series))
def log(self, series, base=10):
return np.log(self.to_float(series)) / np.log(base)
def sin(self, series):
return np.sin(self.to_float(series))
def cos(self, series):
return np.cos(self.to_float(series))
def tan(self, series):
return np.tan(self.to_float(series))
def asin(self, series):
return np.arcsin(self.to_float(series))
def acos(self, series):
return np.arccos(self.to_float(series))
def atan(self, series):
return np.arctan(self.to_float(series))
def sinh(self, series):
return np.arcsinh(self.to_float(series))
def cosh(self, series):
return np.cosh(self.to_float(series))
def tanh(self, series):
return np.tanh(self.to_float(series))
def asinh(self, series):
return np.arcsinh(self.to_float(series))
def acosh(self, series):
return np.arccosh(self.to_float(series))
def atanh(self, series):
return np.arctanh(self.to_float(series))
def floor(self, series):
return np.floor(self.to_float(series))
def ceil(self, series):
return np.ceil(self.to_float(series))
def normalize_chars(self, series):
return series.str.normalize("NFKD").str.encode('ascii', errors='ignore').str.decode('utf8')
def format_date(self, series, current_format=None, output_format=None):
return ks.to_datetime(series, format=current_format,
errors="coerce").dt.strftime(output_format).reset_index(drop=True)
def time_between(self, series, value=None, date_format=None):
value_date_format = date_format
if is_list_or_tuple(date_format) and len(date_format) == 2:
date_format, value_date_format = date_format
if is_list_or_tuple(value) and len(value) == 2:
value, value_date_format = value
date =
|
pd.to_datetime(series, format=date_format, errors="coerce")
|
pandas.to_datetime
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, accuracy_score
from omegaconf import OmegaConf
from torch.utils.data import Dataset, DataLoader
from utils import *
conf = """
base:
api_key_path: "token/token.json"
train_path: "../data/otto-train.csv"
seed: 67
dataset:
img_size: 20
features: 93 # 特徴量の数
val_size: 300 # validationに使うデータ数
target: 0 # ラベル
model:
timeout: 3000 # ms 計算時間
# 訓練に使うデータは(batch_size * n_iter)個
batch_size: 30 # バッチサイズ
n_iter: 5 # ループ数
l: 3 # 正則化項
each_weight: 2.5 # 重み係数
length_weight: 5 # 重みの層の数
multiprocessing: true
"""
target_dict = {
"Class_1": 0,
"Class_2": 1,
"Class_3": 2,
"Class_4": 3,
"Class_5": 4,
"Class_6": 5,
"Class_7": 6,
"Class_8": 7,
"Class_9": 8
}
cfg = OmegaConf.create(conf)
init_client(cfg)
# PyTorch形式
class MyDataset(Dataset):
def __init__(self, data, label):
self.label = label
self.data = np.apply_along_axis(minmax, 1, data)
self.label = np.where(self.label == cfg.dataset.target, 1, 0)
self.data[self.data == 0] = -1
def __len__(self):
return self.data.shape[0]
def __getitem__(self, idx):
return self.data[idx, :], self.label[idx]
def get_ds(n, seed):
true_ = dataset.loc[dataset.iloc[:, -1] == cfg.dataset.target, :].sample(n // 2)
false_ = dataset.loc[dataset.iloc[:, -1] != cfg.dataset.target, :].sample(n // 2)
return pd.concat(
[
true_,
false_
]
).sample(
frac=1,
random_state=seed
).values
dataset =
|
pd.read_csv(cfg.base.train_path)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
import pandas as pd
import pdb
class Evaluator:
def __init__(self, gold_standard_file = None, sep='\t', interaction_label='regulator-target', node_list=None, subnet_dict=None):
if (gold_standard_file is None) and (subnet_dict is not None):
self.gs_flat = pd.Series(subnet_dict['true_edges'])
self.full_list = pd.Series(subnet_dict['edges'])
elif gold_standard_file is not None:
self.gs_file = gold_standard_file
self.gs_data = pd.read_csv(gold_standard_file, sep=sep, header=None)
self.gs_data.columns = ['regulator','target','exists']
self.gs_data['regulator-target'] = list(zip(self.gs_data.regulator, self.gs_data.target))
self.interaction_label = interaction_label
self.gs_flat = self.gs_data[self.gs_data['exists'] > 0]['regulator-target']
self.gs_neg = self.gs_data[self.gs_data['exists'] == 0]['regulator-target']
#ecoli has a unique gold standard file
if 'ecoli' in self.gs_file:
self.regulators = ["G"+str(x) for x in range(1,335)]
self.targets = ["G"+str(x) for x in range(1,4512)]
self.full_list = tuple(map(tuple,self.possible_edges(np.array(self.regulators),np.array(self.targets))))
elif 'omranian' in self.gs_file:
with open('../../data/invitro/omranian_parsed_tf_list.tsv', 'r') as f:
self.regulators = f.read().splitlines()
with open('../../data/invitro/omranian_all_genes_list.tsv', 'r') as f:
self.targets = f.read().splitlines()
self.full_list = tuple(map(tuple, self.possible_edges(np.array(self.regulators), np.array(self.targets))))
elif 'dream5' in self.gs_file:
with open('../../data/dream5/insilico_transcription_factors.tsv', 'r') as f:
self.regulators = f.read().splitlines()
fp = '../../data/dream5/insilico_timeseries.tsv'
df = pd.read_csv(fp, sep='\t')
geneids = df.columns.tolist()
geneids.pop(0)
self.targets = geneids
self.full_list = tuple(map(tuple, self.possible_edges(np.array(self.regulators), np.array(self.targets))))
elif node_list:
all_regulators = np.array(list(set(node_list)))
self.full_list = tuple(map(tuple,self.possible_edges(all_regulators,all_regulators)))
else:
#more robust version of defining the full list
all_regulators = self.gs_data['regulator'].unique().tolist()
all_targets = self.gs_data['target'].unique().tolist()
all_regulators.extend(all_targets)
all_regulators = np.array(list(set(all_regulators)))
self.full_list = tuple(map(tuple,self.possible_edges(all_regulators,
all_regulators)))
#remove self edges
self.full_list = [ x for x in self.full_list if x[0] != x[1] ]
self.full_list = pd.Series(self.full_list)
def possible_edges(self,parents, children):
"""
Create a list of all the possible edges between parents and children
:param parents: array
labels for parents
:param children: array
labels for children
:return: array, length = parents * children
array of parent, child combinations for all possible edges
"""
parent_index = range(len(parents))
child_index = range(len(children))
a, b = np.meshgrid(parent_index, child_index)
parent_list = parents[a.flatten()]
child_list = children[b.flatten()]
possible_edge_list = np.array(list(zip(parent_list, child_list)))
return possible_edge_list
def create_link_list(self,df, w):
parent_names = df.index.values
child_names = df.columns.values
edges = self.possible_edges(parent_names, child_names)
parents = edges[:, 0]
children = edges[:, 1]
directed_edges = df.values.flatten()
all_edges = np.abs(directed_edges)
ll_array = [parents, children, list(zip(parents, children)), directed_edges, all_edges, w]
link_list = pd.DataFrame(ll_array).transpose()
link_list.columns = ['Parent', 'Child', 'Edge', 'Directed_Edge', 'Edge_Exists', 'W']
#link_list.sort(columns='Edge_Exists', ascending=False, inplace=True)
return link_list
def calc_pr(self,pred,sort_on = 'exists'):
tpr = []
fpr = []
pred = pred.drop(pred.index[[i for i,v in enumerate(pred['regulator-target']) if v[0]==v[1]]], axis=0)
pred['tp']=pred['regulator-target'].isin(self.gs_flat)
pred['fp']=~pred['regulator-target'].isin(self.gs_flat)
### find total number of edges
negative_edges = self.full_list[~self.full_list.isin(self.gs_flat)]
total_negative = len(negative_edges)
total_positive = len(self.gs_flat)
### generate cumulative sum of tp, fp, tn, fn
pred['tp_cs'] = pred['tp'].cumsum()
pred['fp_cs'] = pred['fp'].cumsum()
pred['fn_cs'] = total_positive - pred['tp_cs']
pred['tn_cs'] = total_negative - pred['fp_cs']
pred['recall']=pred['tp_cs']/(pred['tp_cs']+pred['fn_cs'])
pred['precision']=pred['tp_cs']/(pred['tp_cs']+pred['fp_cs'])
aupr = integrate.cumtrapz(x=pred['recall'], y=pred['precision']).tolist()
aupr.insert(0,0)
pred['aupr'] = aupr
return pred['precision'], pred['recall'], pred['aupr']
def calc_pr_old(self, pred, sort_on = 'exists'):
# True Positive Rate (TPR) = TP/(TP+FN)
# False Positive Rate (FPR) = FP/(FP+TN)
#initialize counts
#todo: remove below, it is unnecessary
counts = {}
counts['tp'] = 0.0
counts['fp'] = 0.0
counts['fn'] = 0.0
counts['tn'] = 0.0
precision = []
recall = []
current_list = []
#current_list is the list at a certain index.
#at each iteration, add the edge to the current_list.
#evaluate the current list's precision and recall value
#it is assumed that the edges are already sorted in descending rank order
for edge in pred['regulator-target']:
current_list.append(edge)
counts = self._evaluate(current_list)
if counts['tp'] ==0 and counts['fn'] ==0:
recall.append(0.0)
else:
recall.append(counts['tp']/(counts['tp']+counts['fn']))
if counts['fp'] ==0 and counts['tp'] ==0:
precision.append(0.0)
else:
precision.append(counts['tp']/(counts['tp']+counts['fp']))
aupr = integrate.cumtrapz(y=precision, x=recall)
return precision, recall, aupr
def _evaluate(self, current_list):
""" evaluate the list using sets hooray, packs it up in dict """
gold_standard = set(self.gs_flat)
prediction = set(current_list)
full = set(self.full_list)
tp = len(prediction.intersection(gold_standard))
#both in prediction and gold_standard
fp = len(prediction.difference(gold_standard))
#in prediction but not in gold standard
pred_full = full.difference(prediction) # 'negatives' or in full but not pred
gold_full = full.difference(gold_standard) # in full but not in gold
tn = len(pred_full.intersection(gold_full))
fn = len(pred_full.difference(gold_full))
#compare pred - full negatives, and gold - full negatives
#true negatives is the intersection between gold_full and pred full
#false negatives is the number of things in pred full that are not in gold full
results = {}
results['tn'] = float(tn)
results['tp'] = float(tp)
results['fn'] = float(fn)
results['fp'] = float(fp)
return(results)
def calc_roc(self, pred):
"""much faster calculations for AUROC"""
tp = 0.0
fp = 0.0
tpr = []
fpr = []
pred = pred.drop(pred.index[[i for i,v in enumerate(pred['regulator-target']) if v[0]==v[1]]], axis=0)
pred['tp']=pred['regulator-target'].isin(self.gs_flat)
pred['fp']=~pred['regulator-target'].isin(self.gs_flat)
### find total number of edges
negative_edges = self.full_list[~self.full_list.isin(self.gs_flat)]
total_negative = len(negative_edges)
total_positive = len(self.gs_flat)
### generate cumulative sum of tp, fp, tn, fn
pred['tp_cs'] = pred['tp'].cumsum()
pred['fp_cs'] = pred['fp'].cumsum()
pred['fn_cs'] = total_positive - pred['tp_cs']
pred['tn_cs'] = total_negative - pred['fp_cs']
pred['tpr']=pred['tp_cs']/total_positive
pred['fpr']=pred['fp_cs']/total_negative
auroc = integrate.cumtrapz(x=pred['fpr'], y=pred['tpr']).tolist()
auroc.insert(0,0)
pred['auroc'] = auroc
return pred['tpr'], pred['fpr'], pred['auroc']
def calc_roc_old(self, pred):
# True Positive Rate (TPR) = TP/(TP+FN)
# False Positive Rate (FPR) = FP/(FP+TN)
tp = 0.0
fp = 0.0
tpr = []
fpr = []
current_list = []
for edge in pred['regulator-target']:
current_list.append(edge)
counts = self._evaluate(current_list)
total_p = counts['tp']+ counts['fn']
total_n = counts['fp']+ counts['tn']
if total_n == 0:
fpr.append(0.0)
else:
fpr.append(counts['fp']/total_n)
if total_p == 0:
tpr.append(0.0)
else:
tpr.append(counts['tp']/total_p)
auroc = integrate.cumtrapz(x=fpr, y=tpr)
return tpr, fpr, auroc
if __name__ == '__main__':
xls =
|
pd.ExcelFile('../../goldbetter_model/adjacency_matrix.xlsx')
|
pandas.ExcelFile
|
import datetime
import functools
import os
from urllib.parse import urljoin
import arcgis
import geopandas
import numpy
import pandas
import requests
from airflow import DAG
from airflow.hooks.base_hook import BaseHook
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
from airflow.utils.email import send_email
from arcgis.gis import GIS
API_BASE_URL = "https://api2.gethelp.com/v1/"
FACILITIES_ID = "dd618cab800549358bac01bf218406e4"
STATS_ID = "9db2e26c98134fae9a6f5c154a1e9ac9"
TIMESERIES_ID = "bd17014f8a954681be8c383acdb6c808"
COUNCIL_DISTRICTS = (
"https://opendata.arcgis.com/datasets/"
"76104f230e384f38871eb3c4782f903d_13.geojson"
)
def download_council_districts():
r = requests.get(COUNCIL_DISTRICTS)
fname = "/tmp/council-districts.geojson"
with open(fname, "wb") as f:
f.write(r.content)
return fname
def coerce_integer(df):
"""
Loop through the columns of a df, if it is numeric,
convert it to integer and fill nans with zeros.
This is somewhat heavy-handed in an attempt to force
Esri to recognize sparse columns as integers.
"""
# Numeric columns to not coerce to integer
EXCEPT = ["latitude", "longitude", "zipCode"]
def numeric_column_to_int(series):
return (
series.fillna(0).astype(int)
if pandas.api.types.is_numeric_dtype(series) and series.name not in EXCEPT
else series
)
return df.transform(numeric_column_to_int, axis=0)
def upload_to_esri(df, layer_id, filename="/tmp/df.csv"):
"""
A quick helper function to upload a data frame
to ESRI as a featurelayer backed CSV
recommend: no geometries, lat/long columns
remember ESRI is UTC only.
"""
df.to_csv(filename, index=False)
# Login to ArcGIS
arcconnection = BaseHook.get_connection("arcgis")
arcuser = arcconnection.login
arcpassword = arcconnection.password
gis = GIS("http://lahub.maps.arcgis.com", username=arcuser, password=arcpassword)
gis_item = gis.content.get(layer_id)
gis_layer_collection = arcgis.features.FeatureLayerCollection.fromitem(gis_item)
gis_layer_collection.manager.overwrite(filename)
os.remove(filename)
return True
def make_get_help_request(api_path, token, params={}, paginated=True):
"""
Makes an API request to the GetHelp platform.
Also handles depagination of long responses.
Parameters
==========
api_path: string
The path to query
token: string
The OAuth bearer token
params: dict
Any additional query parameters to pass
paginated: boolean
Whether the response is expected to be a list of paginated results
with a "content" field. In this case, the function will depaginate
the results. If false, it will return the raw JSON.
Returns
=======
The depaginated JSON response in the "content" field, or the raw JSON response.
"""
endpoint = urljoin(API_BASE_URL, api_path)
if paginated:
content = []
page = 0
while True:
r = requests.get(
endpoint,
headers={"Authorization": f"Bearer {token}"},
params=dict(page=page, **params),
)
res = r.json()
content = content + res["content"]
if res["last"] is True:
break
else:
page = page + 1
return content
else:
r = requests.get(
endpoint, headers={"Authorization": f"Bearer {token}"}, params=params,
)
return r.json()
def get_facilities():
"""
Get the current facilties and their status.
Returns
=======
A dataframe with the current facilities.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request("facility-groups/1/facilities", TOKEN)
df = pandas.io.json.json_normalize(res)
df = pandas.concat(
[df, df.apply(lambda x: get_client_stats(x["id"]), axis=1)], axis=1,
)
df = pandas.concat(
[df, df.apply(lambda x: get_facility_program_status(x["id"]), axis=1)], axis=1,
)
council_districts = geopandas.read_file(
download_council_districts(), driver="GeoJSON"
)[["geometry", "District"]]
df = geopandas.GeoDataFrame(
df,
geometry=geopandas.points_from_xy(df.longitude, df.latitude),
crs={"init": "epsg:4326"},
)
df = df.assign(
district=df.apply(
lambda x: council_districts[council_districts.contains(x.geometry)]
.iloc[0]
.District,
axis=1,
)
).drop(columns=["geometry"])
return df
def get_client_stats(facility_id):
"""
Given a facility ID, get the current client status.
Parameters
==========
facility_id: int
The facility ID
Returns
=======
A pandas.Series with the client statistics for the facility.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request(
f"facilities/{facility_id}/client-statistics", TOKEN, paginated=False,
)
return (
pandas.Series({**res, **res["genderStats"], **res["clientEvents"]})
.drop(["genderStats", "clientEvents"])
.astype(int)
)
def get_program_client_stats(facility_id, program_id):
"""
Given a facility ID and a program ID, get the current client status.
Parameters
==========
facility_id: int
The facility ID
program_id: int
The program ID
Returns
=======
A pandas.Series with the client statistics for the facility program.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request(
f"facilities/{facility_id}/facility-programs/{program_id}/client-statistics",
TOKEN,
paginated=False,
)
return (
pandas.Series({**res, **res["genderStats"], **res["clientEvents"]})
.drop(["genderStats", "clientEvents"])
.astype(int)
)
def agg_facility_programs(facility_id, program_list, match, prefix):
"""
Aggregate the current bed occupancy data for a list of programs,
filtering by program name.
Parameters
==========
facility_id: int
The facility id.
program_list: list
A list of programs of the shape returned by the GetHelp
facility-programs endpoint.
match: str
A string which is tested for inclusion in a program name
to decide whether to include a program in the statistics.
prefix:
A string to prefix series labels with.
Returns
=======
A pandas.Series with the aggregated statistics for the matching facility programs.
"""
# A sentinel timestamp which is used to determine whether
# any programs actually matched.
sentinel = pandas.Timestamp("2020-01-01T00:00:00Z")
last_updated = functools.reduce(
lambda x, y: (
max(x, pandas.Timestamp(y["lastUpdated"]))
if match in y["name"].lower()
else x
),
program_list,
sentinel,
)
if last_updated == sentinel:
# No programs matched, return early
return None
occupied = functools.reduce(
lambda x, y: x
+ (y["bedsOccupied"] + y["bedsPending"] if match in y["name"].lower() else 0),
program_list,
0,
)
total = functools.reduce(
lambda x, y: x + (y["bedsTotal"] if match in y["name"].lower() else 0),
program_list,
0,
)
available = total - occupied
client_stats = functools.reduce(
lambda x, y: x.add(
get_program_client_stats(facility_id, y["id"]), fill_value=0,
)
if match in y["name"].lower()
else x,
program_list,
pandas.Series(),
)
return pandas.Series(
{
prefix + "occupied": occupied,
prefix + "available": available,
prefix + "last_updated": last_updated,
}
).append(client_stats.rename(lambda x: prefix + x))
def get_facility_program_status(facility_id):
"""
Get the most recent status for a facility, broken
up into shelter beds, trailers, and safe parking.
Parameters
==========
facility_id: int
The facility ID.
Returns
=======
A pandas.Series with program statistics for shelter beds, safe
parking, and trailer beds.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request(f"facilities/{facility_id}/facility-programs", TOKEN)
shelter_beds = agg_facility_programs(
facility_id, res, "shelter bed", "shelter_beds_"
)
isolation = agg_facility_programs(facility_id, res, "isolation", "isolation_")
trailers = agg_facility_programs(facility_id, res, "trailer", "trailers_")
safe_parking = agg_facility_programs(facility_id, res, "parking", "safe_parking_")
return pandas.concat([shelter_beds, isolation, trailers, safe_parking])
def get_facility_history(facility_id, start_date=None, end_date=None):
"""
Get the history stats of a given facility by ID.
Parameters
==========
facility_id: int
The ID of the facility.
start_date: datetime.date
The start date of the history (defaults to April 8, 2020)
end_date: datetme.date
The end date of the history (defaults to the present day)
Returns
=======
A dataframe with the history for the given facility.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
start_date = start_date or datetime.date(2020, 4, 8)
end_date = end_date or pandas.Timestamp.now(tz="US/Pacific").date()
# Get the shelter bed program ID
res = make_get_help_request(f"facilities/{facility_id}/facility-programs", TOKEN)
programs = pandas.io.json.json_normalize(res)
history = pandas.DataFrame()
if not len(programs):
return history
# Get the history stats for the shelter bed programs
for _, program in programs.iterrows():
program_id = program["id"]
res = make_get_help_request(
f"facilities/{facility_id}/facility-programs/{program_id}/statistics",
TOKEN,
params={"startDate": str(start_date), "endDate": str(end_date)},
)
program_history = pandas.io.json.json_normalize(res)
# Add ID column so we can filter by them later
program_history = program_history.assign(program_id=program_id)
history = history.append(program_history)
return history
def assemble_facility_history(facility):
"""
Given a facility, assemble its history stats into a dataframe.
This is the same as get_facility_history, but also adds some
additional columns from the facility data.
Parameters
==========
facility: pandas.Series
A row from the facilities dataframe
Returns
=======
A dataframe with facility history.
"""
print(f"Loading timeseries for {facility['name']}")
history = get_facility_history(facility["id"])
if not len(history):
return None
history = history.assign(
facility_id=facility["id"],
name=facility["name"],
phone=facility["phone"],
website=facility["website"],
address=facility["address1"],
city=facility["city"],
county=facility["county"],
state=facility["state"],
zipCode=facility["zipCode"],
latitude=facility["latitude"],
longitude=facility["longitude"],
district=facility["district"],
).drop(columns=["id"])
return history
def assemble_get_help_timeseries():
"""
Gets a full timeseries for all facilities managed by the GetHelp system.
"""
df = pandas.DataFrame()
facilities = get_facilities()
for idx, facility in facilities.iterrows():
history = assemble_facility_history(facility)
if history is not None:
df = df.append(history)
df = df.assign(
dataDate=pandas.to_datetime(df.dataDate)
.dt.tz_localize("US/Pacific")
.dt.tz_convert("UTC")
).sort_values(["facility_id", "dataDate"])
return df
def load_get_help_data(**kwargs):
facilities = get_facilities().pipe(coerce_integer)
upload_to_esri(facilities, FACILITIES_ID, "/tmp/gethelp-facilities-v6.csv")
timeseries = assemble_get_help_timeseries()
upload_to_esri(timeseries, TIMESERIES_ID, "/tmp/gethelp-timeseries-v2.csv")
# Compute a number of open and reporting shelter beds
active_facilities = facilities[facilities.status != 0]
stats = {
"n_shelters": len(facilities),
"n_shelters_status_known": len(active_facilities),
"n_shelters_with_available_beds": len(
active_facilities[active_facilities.status == 1]
),
"n_available_beds": active_facilities.availableBeds.sum(),
"n_occupied_beds": active_facilities.totalBeds.sum()
- active_facilities.availableBeds.sum(),
}
stats_df = pandas.DataFrame.from_dict(
stats, orient="index", columns=["Count"]
).transpose()
# TODO: Write an assert to make sure all rows are in resultant GDF
upload_to_esri(stats_df, STATS_ID, "/tmp/gethelp-stats.csv")
# push the tables into kwargs for email
kwargs["ti"].xcom_push(key="facilities", value=active_facilities)
kwargs["ti"].xcom_push(key="stats_df", value=stats_df)
def format_program_client_stats(row, prefix):
"""
Given a program in the facility DF (specified by string prefix),
format the client stats (gender, pets, ADA, EMS calls/visits).
Parameters:
===========
row: pandas.Series
The row of the df to format
prefix: str
The prefix for all the stats entries (e.g., 'trailers_', 'isolation_', etc)
Returns
=======
An HTML string of the formatted client stats.
"""
men = row[prefix + "MALE"] + row[prefix + "TRANSGENDER_F_TO_M"]
women = row[prefix + "FEMALE"] + row[prefix + "TRANSGENDER_M_TO_F"]
nonbinary = (
row[prefix + "DECLINED"] + row[prefix + "OTHER"] + row[prefix + "UNDEFINED"]
)
pets = row[prefix + "totalPets"]
ada = row[prefix + "totalAda"]
ems_calls = row[prefix + "EMS_CALL"]
ems_visits = row[prefix + "EMS_VISIT"]
return f"""
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Women: {women}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Men: {men}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Nonbinary/other/declined: {nonbinary}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Pets: {pets}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Clients with ADA needs: {ada}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
EMS calls (last 24 hours): {ems_calls}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
EMS visits (last 24 hours): {ems_visits}
</p>
"""
def format_table(row):
"""
returns a nicely formatted HTML
for each Shelter row
"""
shelter_name = row["name"]
district = row["district"]
# a sentinel timestamp to use as comparisons against updates,
# this should be older than any of the program updateds, and
# can be used to determine if it has never been updated.
old_ts = pandas.Timestamp("2020-01-01T00:00:00Z")
# Shelter stats
shelter_occ = row["shelter_beds_occupied"]
shelter_avail = row["shelter_beds_available"]
shelter_updated = (
row["shelter_beds_last_updated"]
if not pandas.isna(row["shelter_beds_last_updated"])
else old_ts
)
# Isolation stats
isolation_occ = row.get("isolation_occupied", 0)
isolation_updated = (
row.get("isolation_last_updated", None)
if not pandas.isna(row.get("isolation_last_updated", None))
else old_ts
)
# Trailer stats
trailer_occ = row["trailers_occupied"]
trailer_avail = row["trailers_available"]
trailer_updated = (
row["trailers_last_updated"]
if not pandas.isna(row["trailers_last_updated"])
else old_ts
)
# Safe parking stats
safe_parking_occ = row["safe_parking_totalClients"]
safe_parking_updated = (
row["safe_parking_last_updated"]
if not
|
pandas.isna(row["safe_parking_last_updated"])
|
pandas.isna
|
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import pandas as pd
from adjustText import adjust_text
from pylab import cm
from matplotlib import colors
def PCA_var_explained_plots(adata):
n_rows = 1
n_cols = 2
fig = plt.figure(figsize=(n_cols*4.5, n_rows*3))
# variance explained
ax1 = fig.add_subplot(n_rows, n_cols, 1)
x1 = range(len(adata.uns['pca']['variance_ratio']))
y1 = adata.uns['pca']['variance_ratio']
ax1.scatter(x1, y1, s=3)
ax1.set_xlabel('PC'); ax1.set_ylabel('Fraction of variance explained')
ax1.set_title('Fraction of variance explained per PC')
# cum variance explainend
ax2 = fig.add_subplot(n_rows, n_cols, 2)
cml_var_explained = np.cumsum(adata.uns['pca']['variance_ratio'])
x2 = range(len(adata.uns['pca']['variance_ratio']))
y2 = cml_var_explained
ax2.scatter(x2, y2, s=4)
ax2.set_xlabel('PC')
ax2.set_ylabel('Cumulative fraction of variance explained')
ax2.set_title('Cumulative fraction of variance explained by PCs')
plt.tight_layout()
plt.show()
def assign_to_red_or_black_group(x, y, x_cutoff, y_cutoff):
"""xcoord is coefficient (MAST already took log2). ycoord is -log10(pval). label is gene name."""
if abs(x) > x_cutoff and y > y_cutoff:
color = "red"
# x coordinate (coef) is set to 0 if one of the two groups has zero counts (in that case,
# a fold change cannot be calculated). We'll color these points with 'salmon' (similar to red)
elif abs(x) == 0 and y > y_cutoff:
color = "salmon"
else:
color = "black"
return color
def plot_volcano_plot(
dea_results,
x_cutoff,
y_cutoff,
title,
use_zscores=False,
plot_labels=True,
min_red_dots=None,
figsize=(15, 7.5),
show_plot=False,
):
"""makes volcano plot. title is title of plot. path is path to MAST output csv. cutoffs will determine
which dots will be colored red. plot_labels can be set to False if no labels are wanted, otherwise all
red dots will be labeled with their gene name. If min_red_dots is set to a number, the x_cutoff will be
decreased (with factor .9 every time) until at least min_red_dots are red. figsize is a tuple of size 2,
and determines size of the figure. Returns the figure."""
coefs = dea_results.loc[:, "coef"].copy()
xcoords = coefs.fillna(0)
if use_zscores:
pvals = dea_results.loc[:, "coef_Z"]
ycoords = pvals
else:
pvals = dea_results.loc[:, "pval_adj"].copy()
# NOTE: SETTING PVALS TAHT ARE 0 (DUE TO ROUNDING) TO MINIMUM NON ZERO VALUE HERE
pvals[pvals == 0] = np.min(pvals[pvals != 0]) # np.nextafter(0, 1)
ycoords = -np.log10(pvals)
gene_names = dea_results.index.tolist()
colors = [
assign_to_red_or_black_group(x, y, x_cutoff, y_cutoff)
for x, y in zip(xcoords, ycoords)
]
# if min_red_dots is set (i.e. not None), check if enough points are labeled red. If not, adjust x cutoff:
if min_red_dots != None:
n_red_points = sum([x == "red" for x in colors])
while n_red_points < min_red_dots:
x_cutoff = 0.9 * x_cutoff # make x cutoff less stringent
# reevaluate color of points using new cutoff:
colors = [
assign_to_red_or_black_group(x, y, x_cutoff, y_cutoff)
for x, y in zip(xcoords, ycoords)
]
n_red_points = sum([x == "red" for x in colors])
# extract coordinates separately for red and black
black_coords = [
(x, y) for x, y, color in zip(xcoords, ycoords, colors) if color == "black"
]
red_coords = [
(x, y) for x, y, color in zip(xcoords, ycoords, colors) if color == "red"
]
salmon_coords = [
(x, y) for x, y, color in zip(xcoords, ycoords, colors) if color == "salmon"
]
fig, ax = plt.subplots(figsize=figsize)
plt.plot(
[x for x, y in black_coords],
[y for x, y in black_coords],
marker=".",
linestyle="",
color="royalblue",
)
plt.plot(
[x for x, y in salmon_coords],
[y for x, y in salmon_coords],
marker=".",
linestyle="",
color="salmon",
)
plt.plot(
[x for x, y in red_coords],
[y for x, y in red_coords],
marker=".",
linestyle="",
color="red",
)
if plot_labels == True:
ten_lowest_salmon_pvals_gene_names = [
gene_name
for _, gene_name, color in sorted(zip(pvals, gene_names, colors))
if color == "salmon"
][:10]
# label if color is set to red, or if color is set to salmon and the salmon color is one of the ten salmon genes with lowest pval
labels = [
plt.text(x, y, label, ha="center", va="center")
for x, y, color, label in zip(xcoords, ycoords, colors, gene_names)
if (
color in ["red"]
or (color == "salmon" and label in ten_lowest_salmon_pvals_gene_names)
)
]
adjust_text(labels)
plt.xlabel(
"coef (=log(fold chagne))",
fontsize=13,
)
if use_zscores:
plt.ylabel("Z-score based on stdev")
else:
plt.ylabel("-log10 adjusted p-value", fontsize=14)
plt.title(
title
+ " (n genes: "
+ str(len(gene_names))
+ ") \n x-cutoff="
+ str(round(x_cutoff, 2))
+ ", y-cutoff="
+ str(round(y_cutoff, 2)),
fontsize=16,
)
if show_plot == False:
plt.close()
return fig
def plot_bar_chart(
adata,
x_var,
y_var,
x_names=None,
y_names=None,
y_min=0,
return_fig=False,
cmap="tab20",
):
"""plots stacked bar chart.
Arguments
adata - anndata object
x_var - name of obs variable to use for x-axis
y_var - name of obs variable to use for y-axis
x_names - names of x groups to include, exclude all other groups
y_names - names of y groups to include, exclude all other groups
y_min - minimum percentage of group to be labeled in plots. If
percentage of a y_group is lower than this minimum in all
x_groups, then the y_group will be pooled under "other".
return_fig - (Boolean) whether to return matplotlib figure
cmap - name of matplotlib colormap
Returns:
matplotlib figure of barchart if return_fig is True. Otherwise nothing.
"""
bar_chart_df_abs = adata.obs.groupby([x_var, y_var]).agg(
{x_var: "count"}
) # calculate count of each y_var for each x_var
bar_chart_df = (
bar_chart_df_abs.groupby(level=0)
.apply(lambda x: x / float(x.sum()) * 100)
.unstack()
) # convert to percentages
# clean up columns/index
bar_chart_df.columns = bar_chart_df.columns.droplevel(0)
bar_chart_df.index.name = None
bar_chart_df.columns.name = None
# if y_min > 0, re-map y categories:
if y_min > 0:
# check which y variables never have a fraction above y_min
y_var_to_remove = (bar_chart_df >= y_min).sum(axis=0) == 0
y_var_remapping = dict()
for y_name, to_remove in zip(y_var_to_remove.index, y_var_to_remove.values):
if to_remove:
y_var_remapping[y_name] = "other"
else:
y_var_remapping[y_name] = y_name
adata.obs["y_temp"] = adata.obs[y_var].map(y_var_remapping)
# recalculate bar_chart_df, now using re-mapped y_var
bar_chart_df_abs = adata.obs.groupby([x_var, "y_temp"]).agg(
{x_var: "count"}
) # calculate count of each y_var for each x_var
bar_chart_df = (
bar_chart_df_abs.groupby(level=0)
.apply(lambda x: x / float(x.sum()) * 100)
.unstack()
) # convert to percentages
# clean up columns/index
bar_chart_df.columns = bar_chart_df.columns.droplevel(0)
bar_chart_df.index.name = None
bar_chart_df.columns.name = None
# prepare x and y variables for bar chart:
if x_names is None:
x_names = bar_chart_df.index
else:
if not set(x_names).issubset(adata.obs[x_var]):
raise ValueError("x_names should be a subset of adata.obs[x_var]!")
if y_names is None:
y_names = bar_chart_df.columns
else:
if not set(y_names).issubset(adata.obs[y_var]):
raise ValueError(
"y_names should be a subset of adata.obs[y_var]! (Note that this can be affected by your y_min setting.)"
)
# subset bar_chart_df based on x and y names:
bar_chart_df = bar_chart_df.loc[x_names, y_names]
x_len = len(x_names)
y_names = bar_chart_df.columns
y_len = len(y_names)
# setup colors
colormap = cm.get_cmap(cmap)
cols = [colors.rgb2hex(colormap(i)) for i in range(colormap.N)]
# set bar width
barWidth = 0.85
# plot figure
fig = plt.figure(figsize=(12, 3))
axs = []
# plot the bottom bars of the stacked bar chart
axs.append(
plt.bar(
range(len(x_names)),
bar_chart_df.loc[:, y_names[0]],
color=cols[0],
# edgecolor="white",
width=barWidth,
label=y_names[0],
)
)
# store the bars as bars_added, to know where next stack of bars should start
# in y-axis
bars_added = [bar_chart_df.loc[:, y_names[0]]]
# now loop through the remainder of the y categories and plot
for i, y in enumerate(y_names[1:]):
axs.append(
plt.bar(
x=range(len(x_names)), # numbers of bars [1, ..., n_bars]
height=bar_chart_df.loc[:, y], # height of current stack
bottom=[
sum(idx_list) for idx_list in zip(*bars_added)
], # where to start current stack
color=cols[i + 1],
# edgecolor="white",
width=barWidth,
label=y,
)
)
# append plottend bars to bars_added variable
bars_added.append(bar_chart_df.loc[:, y])
# Custom x axis
plt.xticks(range(len(x_names)), x_names, rotation=90)
plt.xlabel(x_var)
# Add a legend
plt.legend(
axs[::-1],
[ax.get_label() for ax in axs][::-1],
loc="upper left",
bbox_to_anchor=(1, 1),
ncol=1,
)
# add y label:
plt.ylabel("percentage of cells")
# add title:
plt.title(f"{y_var} fractions per {x_var} group")
# Show graphic:
plt.show()
# return figure:
if return_fig:
return fig
def plot_dataset_statistics(
adata, return_fig=False, show=True, fontsize=10, figwidthscale=3, figheightscale=4
):
data_by_subject = adata.obs.groupby("subject_ID").agg(
{
"study": "first",
}
)
data_by_sample = adata.obs.groupby("sample").agg({"study": "first"})
n_figures = 3
n_cols = 3
n_rows = int(np.ceil(n_figures / n_cols))
fig = plt.figure(figsize=(figwidthscale * n_cols, figheightscale * n_rows))
fig_count = 0
# FIGURE
fig_count += 1
ax = fig.add_subplot(n_rows, n_cols, fig_count)
dataset_subj_freqs = data_by_subject.study.value_counts()
datasets_ordered = dataset_subj_freqs.index
ax.bar(dataset_subj_freqs.index, dataset_subj_freqs.values)
ax.set_title("subjects per study", fontsize=fontsize)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.tick_params(axis="x", rotation=90, labelsize=fontsize)
ax.tick_params(axis="y", labelsize=fontsize)
ax.grid(False)
# FIGURE
fig_count += 1
ax = fig.add_subplot(n_rows, n_cols, fig_count)
dataset_sample_freqs = data_by_sample.study.value_counts()
ax.bar(datasets_ordered, dataset_sample_freqs[datasets_ordered].values)
ax.set_title("samples per study", fontsize=fontsize)
ax.set_ylabel("n samples", fontsize=fontsize)
ax.tick_params(axis="x", rotation=90, labelsize=fontsize)
ax.tick_params(axis="y", labelsize=fontsize)
ax.grid(False)
# FIGURE
fig_count += 1
ax = fig.add_subplot(n_rows, n_cols, fig_count)
dataset_cell_freqs = adata.obs.study.value_counts()
ax.bar(datasets_ordered, dataset_cell_freqs[datasets_ordered].values)
ax.set_title("cells per study", fontsize=fontsize)
ax.set_ylabel("n cells", fontsize=fontsize)
ax.tick_params(axis="x", rotation=90, labelsize=fontsize)
ax.tick_params(axis="y", labelsize=fontsize)
ax.grid(False)
plt.tight_layout()
plt.grid(False)
if show:
plt.show()
plt.close()
if return_fig:
return fig
def plot_subject_statistics(
adata,
return_fig=False,
show=True,
fontsize=12,
figheight=5,
figwidth=5,
barwidth=0.10,
):
data_by_subject = adata.obs.groupby("subject_ID").agg(
{
"age": "first",
"BMI": "first",
"ethnicity": "first",
"sex": "first",
"smoking_status": "first",
}
)
fig = plt.figure(
figsize=(figwidth, figheight),
constrained_layout=True,
)
gs = GridSpec(12, 12, figure=fig)
fig_count = 0
# FIGURE 1 AGE
fig_count += 1
ax = fig.add_subplot(gs[:6, :6])
bins = np.arange(0, max(adata.obs.age), 5)
tick_idc = np.arange(0, len(bins), 4)
perc_annotated = int(
np.round(
100 - (data_by_subject.age.isnull().sum() / data_by_subject.shape[0] * 100),
0,
)
)
ax.hist(data_by_subject.age, bins=bins, rwidth=0.9)
print(f"age: {perc_annotated}% annotated")
ax.set_xlabel("age", fontsize=fontsize)
ax.set_xticks(bins[tick_idc])
ax.tick_params(labelsize=fontsize, bottom=True, left=True)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.grid(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
# FIGURE 2 BMI
fig_count += 1
ax = fig.add_subplot(gs[:6, -6:])
BMIs = data_by_subject.BMI.copy()
perc_annotated = int(round(100 - (BMIs.isna().sum() / len(BMIs) * 100)))
BMIs = BMIs[~BMIs.isna()]
bins = np.arange(np.floor(BMIs.min() / 2) * 2, BMIs.max(), 2)
tick_idc = np.arange(0, len(bins), 3)
ax.hist(data_by_subject.BMI, bins=bins, rwidth=0.9)
print(f"BMI: {perc_annotated}% annotated")
ax.set_xlabel("BMI", fontsize=fontsize)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.set_xticks(bins[tick_idc])
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.tick_params(labelsize=fontsize, bottom=True, left=True)
ax.grid(False)
# FIGURE 3 SEX
fig_count += 1
ax = fig.add_subplot(gs[-6:, :3])
x_man = np.sum(data_by_subject.sex == "male")
x_woman = np.sum(data_by_subject.sex == "female")
perc_annotated = int(
np.round(
100
- sum([s == "nan" or pd.isnull(s) for s in data_by_subject.sex])
/ data_by_subject.shape[1]
* 100,
0,
)
)
ax.bar(
x=[0.25, 0.75],
tick_label=["male", "female"],
height=[x_man, x_woman],
width=barwidth * 5 / 3,
)
ax.set_xlim(left=0, right=1)
print(f"sex: {perc_annotated}% annotated)")
ax.tick_params("x", rotation=90, labelsize=fontsize, bottom=True, left=True)
ax.tick_params("y", labelsize=fontsize, bottom=True, left=True)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.set_xlabel("sex", fontsize=fontsize)
ax.grid(False)
# FIGURE 4 ETHNICITY
fig_count += 1
ax = fig.add_subplot(gs[-6:, 3:-4])
ethns = data_by_subject.ethnicity.copy()
perc_annotated = int(
np.round(
100 - sum([e == "nan" or
|
pd.isnull(e)
|
pandas.isnull
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'),
values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0)],
'PayDay': [datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0), ]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,
6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan,
np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)],
columns=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31),
datetime(2013, 9, 30)),
(datetime(2013, 10, 31),
datetime(2013, 11, 30)),
(datetime(2013, 10, 31),
datetime(2013, 12, 31)),
(datetime(2013, 11, 30),
datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)), ]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan,
np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(
df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'],
columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=[
'label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum',
'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',
'2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]],
dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'],
values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month,
columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name='dt2'),
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X',
'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan,
4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(DataFrame(ts), index=ts.index.year,
columns=ts.index.dayofyear)
annual.columns = annual.columns.droplevel(0)
doy = np.asarray(ts.index.dayofyear)
for i in range(1, 367):
subset = ts[doy == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(pd.DataFrame(ts), index=ts.index.year,
columns=ts.index.month)
annual.columns = annual.columns.droplevel(0)
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {'D': 'sum', 'E': 'mean'}
pivot_values_list = pd.pivot_table(
self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,
)
pivot_values_keys = pd.pivot_table(
self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
def test_pivot_table_margins_name_with_aggfunc_list(self):
# GH 13354
margins_name = 'Weekly'
costs = pd.DataFrame(
{'item': ['bacon', 'cheese', 'bacon', 'cheese'],
'cost': [2.5, 4.5, 3.2, 3.3],
'day': ['M', 'M', 'T', 'T']}
)
table = costs.pivot_table(
index="item", columns="day", margins=True,
margins_name=margins_name, aggfunc=[np.mean, max]
)
ix = pd.Index(
['bacon', 'cheese', margins_name], dtype='object', name='item'
)
tups = [('mean', 'cost', 'M'), ('mean', 'cost', 'T'),
('mean', 'cost', margins_name), ('max', 'cost', 'M'),
('max', 'cost', 'T'), ('max', 'cost', margins_name)]
cols = pd.MultiIndex.from_tuples(tups, names=[None, None, 'day'])
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins_category(self, observed):
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
df.y = df.y.astype('category')
df.z = df.z.astype('category')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_categorical_aggfunc(self, observed):
# GH 9534
df = pd.DataFrame({"C1": ["A", "B", "C", "C"],
"C2": ["a", "a", "b", "b"],
"V": [1, 2, 3, 4]})
df["C1"] = df["C1"].astype("category")
result = df.pivot_table("V", index="C1", columns="C2",
dropna=observed, aggfunc="count")
expected_index = pd.CategoricalIndex(['A', 'B', 'C'],
categories=['A', 'B', 'C'],
ordered=False,
name='C1')
expected_columns = pd.Index(['a', 'b'], name='C2')
expected_data = np.array([[1., np.nan],
[1., np.nan],
[np.nan, 2.]])
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_categorical_pivot_index_ordering(self, observed):
# GH 8731
df = pd.DataFrame({'Sales': [100, 120, 220],
'Month': ['January', 'January', 'January'],
'Year': [2013, 2014, 2013]})
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
df['Month'] = df['Month'].astype('category').cat.set_categories(months)
result = df.pivot_table(values='Sales',
index='Month',
columns='Year',
dropna=observed,
aggfunc='sum')
expected_columns = pd.Int64Index([2013, 2014], name='Year')
expected_index = pd.CategoricalIndex(['January'],
categories=months,
ordered=False,
name='Month')
expected = pd.DataFrame([[320, 120]],
index=expected_index,
columns=expected_columns)
if not observed:
result = result.dropna().astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_pivot_table_not_series(self):
# GH 4386
# pivot_table always returns a DataFrame
# when values is not list like and columns is None
# and aggfunc is not instance of list
df = DataFrame({'col1': [3, 4, 5],
'col2': ['C', 'D', 'E'],
'col3': [1, 3, 9]})
result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
m = MultiIndex.from_arrays([[1, 3, 9],
['C', 'D', 'E']],
names=['col3', 'col2'])
expected = DataFrame([3, 4, 5],
index=m, columns=['col1'])
tm.assert_frame_equal(result, expected)
result = df.pivot_table(
'col1', index='col3', columns='col2', aggfunc=np.sum
)
expected = DataFrame([[3, np.NaN, np.NaN],
[np.NaN, 4, np.NaN],
[np.NaN, np.NaN, 5]],
index=Index([1, 3, 9], name='col3'),
columns=Index(['C', 'D', 'E'], name='col2'))
tm.assert_frame_equal(result, expected)
result = df.pivot_table('col1', index='col3', aggfunc=[np.sum])
m = MultiIndex.from_arrays([['sum'],
['col1']])
expected = DataFrame([3, 4, 5],
index=Index([1, 3, 9], name='col3'),
columns=m)
tm.assert_frame_equal(result, expected)
def test_pivot_margins_name_unicode(self):
# issue #13292
greek = u'\u0394\u03bf\u03ba\u03b9\u03bc\u03ae'
frame = pd.DataFrame({'foo': [1, 2, 3]})
table = pd.pivot_table(frame, index=['foo'], aggfunc=len, margins=True,
margins_name=greek)
index = pd.Index([1, 2, 3, greek], dtype='object', name='foo')
expected = pd.DataFrame(index=index)
tm.assert_frame_equal(table, expected)
def test_pivot_string_as_func(self):
# GH #18713
# for correctness purposes
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',
'bar', 'bar', 'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one',
'one', 'two', 'two', 'two', 'one'],
'C': range(11)})
result = pivot_table(data, index='A', columns='B', aggfunc='sum')
mi = MultiIndex(levels=[['C'], ['one', 'two']],
codes=[[0, 0], [0, 1]], names=[None, 'B'])
expected = DataFrame({('C', 'one'): {'bar': 15, 'foo': 13},
('C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
result = pivot_table(data, index='A', columns='B',
aggfunc=['sum', 'mean'])
mi = MultiIndex(levels=[['sum', 'mean'], ['C'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
names=[None, None, 'B'])
expected = DataFrame({('mean', 'C', 'one'): {'bar': 5.0, 'foo': 3.25},
('mean', 'C', 'two'): {'bar': 7.0,
'foo': 6.666666666666667},
('sum', 'C', 'one'): {'bar': 15, 'foo': 13},
('sum', 'C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('f, f_numpy',
[('sum', np.sum),
('mean', np.mean),
('std', np.std),
(['sum', 'mean'], [np.sum, np.mean]),
(['sum', 'std'], [np.sum, np.std]),
(['std', 'mean'], [np.std, np.mean])])
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
result = pivot_table(self.data, index='A', columns='B', aggfunc=f)
expected = pivot_table(self.data, index='A', columns='B',
aggfunc=f_numpy)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_pivot_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame({'ind1': np.arange(2 ** 16),
'ind2': np.arange(2 ** 16),
'count': 0})
with pytest.raises(ValueError, match='int32 overflow'):
df.pivot_table(index='ind1', columns='ind2',
values='count', aggfunc='count')
class TestCrosstab(object):
def setup_method(self, method):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df['A'], df['C'])
expected = df.groupby(['A', 'C']).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df['A'], [df['B'], df['C']])
expected = df.groupby(['A', 'B', 'C']).size()
expected = expected.unstack(
'B').unstack('C').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df['B'], df['C']], df['A'])
expected = df.groupby(['B', 'C', 'A']).size()
expected = expected.unstack('A').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))
expected = crosstab(df['a'], [df['b'], df['c']])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))
expected = crosstab([df['b'], df['c']], df['a'])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df['A'].values, self.df['C'].values)
assert result.index.name == 'row_0'
assert result.columns.name == 'col_0'
def test_crosstab_non_aligned(self):
# GH 17005
a = pd.Series([0, 1, 1], index=['a', 'b', 'c'])
b = pd.Series([3, 4, 3, 4, 3], index=['a', 'b', 'c', 'd', 'f'])
c = np.array([3, 4, 3])
expected = pd.DataFrame([[1, 0], [1, 1]],
index=Index([0, 1], name='row_0'),
columns=Index([3, 4], name='col_0'))
result = crosstab(a, b)
tm.assert_frame_equal(result, expected)
result = crosstab(a, c)
tm.assert_frame_equal(result, expected)
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True)
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['All', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['All'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('All', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['All']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
exp_rows.name = 'All'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_margins_set_margin_name(self):
# GH 15972
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name='TOTAL')
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['TOTAL', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['TOTAL'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('TOTAL', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['TOTAL']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('TOTAL', '')]))
exp_rows.name = 'TOTAL'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
for margins_name in [666, None, ['a', 'b']]:
with pytest.raises(ValueError):
crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name=margins_name)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab([a, b], c, values, aggfunc=np.sum,
rownames=['foo', 'bar'], colnames=['baz'])
df = DataFrame({'foo': a, 'bar': b, 'baz': c, 'values': values})
expected = df.pivot_table('values', index=['foo', 'bar'],
columns='baz', aggfunc=np.sum)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(['foo', 'foo', 'foo', 'bar',
'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one',
'two', 'two', 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull',
'dull', 'shiny', 'shiny'], dtype=object)
res = pd.crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], dropna=False)
m = MultiIndex.from_tuples([('one', 'dull'), ('one', 'shiny'),
('two', 'dull'), ('two', 'shiny')],
names=['b', 'c'])
tm.assert_index_equal(res.columns, m)
def test_crosstab_no_overlap(self):
# GS 10291
s1 = pd.Series([1, 2, 3], index=[1, 2, 3])
s2 = pd.Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
expected = pd.DataFrame()
tm.assert_frame_equal(actual, expected)
def test_margin_dropna(self):
# GH 12577
# pivot_table counts null into margin ('All')
# when margins=true and dropna=true
df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],
'b': [3, np.nan, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3.0, 4.0, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, np.nan, 2],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
# GH 12642
# _add_margins raises KeyError: Level None not found
# when margins=True and dropna=False
df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 4, 6]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],
'b': [3, np.nan, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 4, 6]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3.0, 4.0, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
a = np.array(['foo', 'foo', 'foo', 'bar',
'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one',
'two', np.nan, 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull',
'dull', 'shiny', 'shiny'], dtype=object)
actual = pd.crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], margins=True, dropna=False)
m = MultiIndex.from_arrays([['one', 'one', 'two', 'two', 'All'],
['dull', 'shiny', 'dull', 'shiny', '']],
names=['b', 'c'])
expected = DataFrame([[1, 0, 1, 0, 2], [2, 0, 1, 1, 5],
[3, 0, 2, 1, 7]], columns=m)
expected.index = Index(['bar', 'foo', 'All'], name='a')
tm.assert_frame_equal(actual, expected)
actual = pd.crosstab([a, b], c, rownames=['a', 'b'],
colnames=['c'], margins=True, dropna=False)
m = MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo', 'All'],
['one', 'two', 'one', 'two', '']],
names=['a', 'b'])
expected = DataFrame([[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2],
[5, 2, 7]], index=m)
expected.columns = Index(['dull', 'shiny', 'All'], name='c')
tm.assert_frame_equal(actual, expected)
actual = pd.crosstab([a, b], c, rownames=['a', 'b'],
colnames=['c'], margins=True, dropna=True)
m = MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo', 'All'],
['one', 'two', 'one', 'two', '']],
names=['a', 'b'])
expected = DataFrame([[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2],
[5, 1, 6]], index=m)
expected.columns = Index(['dull', 'shiny', 'All'], name='c')
tm.assert_frame_equal(actual, expected)
def test_crosstab_normalize(self):
# Issue 12578
df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],
'c': [1, 1, np.nan, 1, 1]})
rindex = pd.Index([1, 2], name='a')
cindex = pd.Index([3, 4], name='b')
full_normal = pd.DataFrame([[0.2, 0], [0.2, 0.6]],
index=rindex, columns=cindex)
row_normal = pd.DataFrame([[1.0, 0], [0.25, 0.75]],
index=rindex, columns=cindex)
col_normal = pd.DataFrame([[0.5, 0], [0.5, 1.0]],
index=rindex, columns=cindex)
# Check all normalize args
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='all'),
full_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True),
full_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index'),
row_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns'),
col_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=1),
|
pd.crosstab(df.a, df.b, normalize='columns')
|
pandas.crosstab
|
from typing import List
import logging
import json
import random
import pandas as pd
from tqdm import tqdm
from haystack.schema import Document, Label
from haystack.modeling.data_handler.processor import _read_squad_file
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
tqdm.pandas()
COLUMN_NAMES = ["title", "context", "question", "id", "answer_text", "answer_start", "is_impossible"]
class SquadData:
"""
This class is designed to manipulate data that is in SQuAD format
"""
def __init__(self, squad_data):
"""
:param squad_data: SQuAD format data, either as a dict with a `data` key, or just a list of SQuAD documents
"""
if type(squad_data) == dict:
self.version = squad_data.get("version")
self.data = squad_data["data"]
elif type(squad_data) == list:
self.version = None
self.data = squad_data
self.df = self.to_df(self.data)
def merge_from_file(self, filename: str):
"""Merge the contents of a SQuAD format json file with the data stored in this object"""
new_data = json.load(open(filename))["data"]
self.merge(new_data)
def merge(self, new_data: List):
"""
Merge data in SQuAD format with the data stored in this object
:param new_data: A list of SQuAD document data
"""
df_new = self.to_df(new_data)
self.df = pd.concat([df_new, self.df])
self.data = self.df_to_data(self.df)
@classmethod
def from_file(cls, filename: str):
"""
Create a SquadData object by providing the name of a SQuAD format json file
"""
data = json.load(open(filename))
return cls(data)
def save(self, filename: str):
"""
Write the data stored in this object to a json file.
"""
with open(filename, "w") as f:
squad_data = {"version": self.version, "data": self.data}
json.dump(squad_data, f, indent=2)
def to_dpr_dataset(self):
raise NotImplementedError(
"SquadData.to_dpr_dataset() not yet implemented. "
"For now, have a look at the script at haystack/retriever/squad_to_dpr.py"
)
def to_document_objs(self):
"""
Export all paragraphs stored in this object to haystack.Document objects.
"""
df_docs = self.df[["title", "context"]]
df_docs = df_docs.drop_duplicates()
record_dicts = df_docs.to_dict("records")
documents = [Document(content=rd["context"], id=rd["title"]) for rd in record_dicts]
return documents
# TODO refactor to new Label objects
def to_label_objs(self):
"""
Export all labels stored in this object to haystack.Label objects.
"""
df_labels = self.df[["id", "question", "answer_text", "answer_start"]]
record_dicts = df_labels.to_dict("records")
labels = [
Label(
query=rd["question"],
answer=rd["answer_text"],
is_correct_answer=True,
is_correct_document=True,
id=rd["id"],
origin=rd.get("origin", "SquadData tool"),
document_id=rd.get("document_id", None),
)
for rd in record_dicts
]
return labels
@staticmethod
def to_df(data):
"""Convert a list of SQuAD document dictionaries into a pandas dataframe (each row is one annotation)"""
flat = []
for document in data:
title = document["title"]
for paragraph in document["paragraphs"]:
context = paragraph["context"]
for question in paragraph["qas"]:
q = question["question"]
id = question["id"]
is_impossible = question["is_impossible"]
# For no_answer samples
if len(question["answers"]) == 0:
flat.append(
{
"title": title,
"context": context,
"question": q,
"id": id,
"answer_text": "",
"answer_start": None,
"is_impossible": is_impossible,
}
)
# For span answer samples
else:
for answer in question["answers"]:
answer_text = answer["text"]
answer_start = answer["answer_start"]
flat.append(
{
"title": title,
"context": context,
"question": q,
"id": id,
"answer_text": answer_text,
"answer_start": answer_start,
"is_impossible": is_impossible,
}
)
df = pd.DataFrame.from_records(flat)
return df
def count(self, unit="questions"):
"""
Count the samples in the data. Choose from unit = "paragraphs", "questions", "answers", "no_answers", "span_answers"
"""
c = 0
for document in self.data:
for paragraph in document["paragraphs"]:
if unit == "paragraphs":
c += 1
for question in paragraph["qas"]:
if unit == "questions":
c += 1
# Count no_answers
if len(question["answers"]) == 0:
if unit in ["answers", "no_answers"]:
c += 1
# Count span answers
else:
for answer in question["answers"]:
if unit in ["answers", "span_answers"]:
c += 1
return c
@classmethod
def df_to_data(cls, df):
"""
Convert a dataframe into SQuAD format data (list of SQuAD document dictionaries).
"""
logger.info("Converting data frame to squad format data")
# Aggregate the answers of each question
logger.info("Aggregating the answers of each question")
df_grouped_answers = df.groupby(["title", "context", "question", "id", "is_impossible"])
df_aggregated_answers = (
df[["title", "context", "question", "id", "is_impossible"]].drop_duplicates().reset_index()
)
answers = df_grouped_answers.progress_apply(cls._aggregate_answers).rename("answers")
answers = pd.DataFrame(answers).reset_index()
df_aggregated_answers = pd.merge(df_aggregated_answers, answers)
# Aggregate the questions of each passage
logger.info("Aggregating the questions of each paragraphs of each document")
df_grouped_questions = df_aggregated_answers.groupby(["title", "context"])
df_aggregated_questions = df[["title", "context"]].drop_duplicates().reset_index()
questions = df_grouped_questions.progress_apply(cls._aggregate_questions).rename("qas")
questions = pd.DataFrame(questions).reset_index()
df_aggregated_questions =
|
pd.merge(df_aggregated_questions, questions)
|
pandas.merge
|
"""
Compare temperature readings in the R-cubed riser for different catalyst flow
rates. Run this program for each low, mid, and high process gas flow.
Examples
--------
First argument denoted as low, mid, or high for process gas flow. Second
argument for temperature as te709c, te709b, te709a, te707c, te707b, te707a,
te705, or te701.
>>> python temp_cat.py low te709c
>>> python temp_cat.py mid te709c
>>> python temp_cat.py high te705
"""
import argparse
import matplotlib.pyplot as plt
import os
import pandas as pd
from utils import df_experiment, stats, config, config_subplot
# Command line argument
# ----------------------------------------------------------------------------
# input low, mid, or high for process gas experiments
parser = argparse.ArgumentParser()
parser.add_argument('pg', help='process gas group as low, mid, high')
parser.add_argument('col', help='column name as TE709C, TE705 etc.')
args = parser.parse_args()
n = ('low', 'mid', 'high').index(args.pg)
col = args.col.upper()
# Parameters for process gas flow experiments
# ----------------------------------------------------------------------------
# experiments are referred to as the item numbers in the experimental matrix
# each experiment was repeated three times
# items = | low gas flow | mid gas flow | high gas flow |
nocat_items = ('001', '005', '009'), ('013', '017', '021'), ('025', '029', '033')
lowcat_items = ('002', '006', '010'), ('014', '018', '022'), ('026', '030', '034')
midcat_items = ('003', '007', '011'), ('015', '019', '023'), ('027', '031', '035')
hicat_items = ('004', '008', '012'), ('016', '020', '024'), ('028', '032', '036')
# catalyst flow rates as reported for each experiment [kg/hr]
# cat. flows for third experiment in mid gas were not reported so assume values
# items = | low gas flow | mid gas flow | high gas flow |
lowcat = (46.8, 49.5, 44.3), (45.8, 45.1, 47.3), (53.2, 46.9, 56.5)
midcat = (91.3, 95.3, 91.6), (89.2, 95.1, 94.6), (102.2, 98.4, 97.1)
hicat = (137.6, 136.5, 141.7), (131.8, 138.2, 141.3), (140.1, 140.9, 144.8)
# Analyze process gas flow data from thermocouple TE709C
# ----------------------------------------------------------------------------
h19_files = [f for f in os.listdir('processed-hydro') if f.endswith('h19.csv')]
df_nocat = df_experiment(col, nocat_items[n], h19_files)
df_lowcat = df_experiment(col, lowcat_items[n], h19_files)
df_midcat = df_experiment(col, midcat_items[n], h19_files)
df_hicat = df_experiment(col, hicat_items[n], h19_files)
# stats from experimental data
df_stats =
|
pd.DataFrame(columns=['start', 'stop', 'catflow', 'mean', 'std', 'max', 'min'])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 21:29:34 2020
@author: the_squad
"""
#!pip install bert-for-tf2
#!pip install sentencepiece
#!pip install bert
import collections
import json
import pandas as pd
from math import floor
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras.models import Model # Keras is the new high level API for TensorFlow
import numpy as np
import os
import regex as re
import bert
from tensorflow.keras.layers import Concatenate
import pickle
#!export CUDA_VISIBLE_DEVICES=0
#physical_devices = tf.config.experimental.list_physical_devices('GPU')
#for physical_device in physical_devices:
# tf.config.experimental.set_memory_growth(physical_device, True)
#os.chdir('/home/pablo/Desktop/test')
#x = "/home/pablo/Desktop/test/"
os.chdir('/users/paula/scratch/val2')
x = "/users/paula/scratch/val2"
subdirs = [os.path.join(x, o) for o in os.listdir(x) if os.path.isdir(os.path.join(x,o))]
new_dirs=[]
files = []
for dirs in subdirs:
# r=root, d=directories, f = files
for r, d, f in os.walk(dirs):
for file in f:
files.append(os.path.join(r, file))
new_dirs.append(os.path.split(dirs)[1])
df = pd.DataFrame(list(zip(files, new_dirs)), columns=['metadata_file','target'])
# To list all files and their full paths (without extensions):
xyz=pd.Series()
for i in np.arange(len(df)):
df['metadata_file'][i] = df['metadata_file'][i].split(".")[0]
df = df.drop_duplicates().copy()
filelist = list(df.metadata_file)
classlist = list(df.target)
descriptions=[]
ids=[]
targets=[]
pathz=[]
new_dirs=[]
titles=[]
for file in filelist:
with open(file) as f:
json_data = json.load(f)
descriptions.append(json_data['description'])
ids.append(json_data['id'])
titles.append(json_data['title'])
new_titles = []
new_descriptions = []
# encode as ascii, decode to convert back to characters from bytes. Also, do regex cleanup on special characters not indicative of meaning
for title in titles:
new_titles.append(re.sub(r"[^a-zA-Z?.!,¿#@]+", " ",title.encode('ascii',errors='ignore').decode('UTF-8')))
for description in descriptions:
new_descriptions.append(re.sub(r"[^a-zA-Z?.!,¿#@]+", " ",description.encode('ascii',errors='ignore').decode('UTF-8')))
for i in range(len(new_titles)):
new_titles[i] = new_titles[i].lower()
for i in range(len(new_descriptions)):
new_descriptions[i] = new_descriptions[i].lower()
for i in range(len(new_titles)):
new_titles[i] = new_titles[i].lower()
for i in range(len(new_descriptions)):
new_descriptions[i] = new_descriptions[i].lower()
targets = pd.Series(df['target']).reset_index(drop=True)
# put words into a dictionary for downstream use
def build_dataset(words):
count = collections.Counter(words).most_common() #.most_common(100) to use the 100 most common words; .most_common() means zero is the most common
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return dictionary, reverse_dictionary
titles_train = new_titles[:floor(len(new_titles)*.75)]
titles_test = new_titles[floor(len(new_titles)*.75):]
descriptions_train = new_descriptions[:floor(len(new_descriptions)*.75)]
descriptions_test = new_descriptions[floor(len(new_descriptions)*.75):]
targets_train = targets[:floor(len(targets)*.75)]
targets_test = targets[floor(len(targets)*.75):]
title_word_list=[]
for i in np.arange(len(new_titles)):
title_word_list.append(new_titles[i].split())
title_flat_list = []
for sublist in title_word_list:
for item in sublist:
title_flat_list.append(item)
title_word_list = title_flat_list.copy()
from collections import Counter
title_word_to_id = Counter()
for word in title_word_list:
title_word_to_id[word] += 1
# For unique dictionary values of key words
title_word_to_id = {k:(i + 3) for i,(k,v) in enumerate(title_word_to_id.items())}
title_word_to_id["<PAD>"] = 0 # there is no value this replaces; it just adds a pad
title_word_to_id["<START>"] = 1 # BERT doesn't use START tokens so using spaces instead; spaces will be trimmed out
title_word_to_id["<UNK>"] = 2 # UNK tokens are good. BERT converts them to ## so it knows it's unknown
title_word_to_id["<UNUSED>"] = 3
title_id_to_word = {value:key for key, value in title_word_to_id.items()}
description_word_list=[]
for i in np.arange(len(new_descriptions)):
description_word_list.append(new_descriptions[i].split())
description_flat_list = []
for sublist in description_word_list:
for item in sublist:
description_flat_list.append(item)
description_word_list = description_flat_list.copy()
from collections import Counter
description_word_to_id = Counter()
for word in description_word_list:
description_word_to_id[word] += 1
# For unique dictionary values of key words
description_word_to_id = {k:(i + 3) for i,(k,v) in enumerate(description_word_to_id.items())}
description_word_to_id["<PAD>"] = 0 # there is no value this replaces; it just adds a pad
description_word_to_id["<START>"] = 1 # BERT doesn't use START tokens so using spaces instead; spaces will be trimmed out
description_word_to_id["<UNK>"] = 2 # UNK tokens are good. BERT converts them to ## so it knows it's unknown
description_word_to_id["<UNUSED>"] = 3
description_id_to_word = {value:key for key, value in description_word_to_id.items()}
##################
### BERT MODEL ###
##################
#max_seq_length = 128 # Your choice here.
max_seq_length = 512 # Your choice here.
input_word_ids = tf.keras.layers.Input(shape=(max_seq_length), dtype=tf.int32,
name="input_word_ids")
input_mask = tf.keras.layers.Input(shape=(max_seq_length), dtype=tf.int32,
name="input_mask")
segment_ids = tf.keras.layers.Input(shape=(max_seq_length), dtype=tf.int32,
name="segment_ids")
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2",
trainable=True)
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
######################
### BERT TOKENIZER ###
######################
# Set up tokenizer to generate Tensorflow dataset
FullTokenizer = bert.bert_tokenization.FullTokenizer
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = FullTokenizer(vocab_file, do_lower_case)
train_titles_tokens = list(map(lambda titles_train: ['[CLS]'] + tokenizer.tokenize(titles_train)[:510] + ['[SEP]'], titles_train))
test_titles_tokens = list(map(lambda titles_test: ['[CLS]'] + tokenizer.tokenize(titles_test)[:510] + ['[SEP]'], titles_test))
description_train_tokens = []
for desc_train in descriptions_train:
desc_train = ['[CLS]'] + tokenizer.tokenize(desc_train)[:510] + ['[SEP]']
description_train_tokens.append(desc_train)
description_test_tokens = []
for desc_test in descriptions_test:
desc_test = ['[CLS]'] + tokenizer.tokenize(desc_test)[:510] + ['[SEP]']
description_test_tokens.append(desc_test)
def get_ids(tokens, tokenizer, max_seq_length):
"""Token ids from Tokenizer vocab"""
token_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = token_ids + [0] * (max_seq_length-len(token_ids))
return np.array(input_ids)
def get_masks(tokens, max_seq_length):
"""Mask for padding"""
if len(tokens)>max_seq_length:
raise IndexError("Token length more than max seq length!")
return np.array([1]*len(tokens) + [0] * (max_seq_length - len(tokens)))
def get_segments(tokens, max_seq_length):
"""Segments: 0 for the first sequence, 1 for the second"""
if len(tokens)>max_seq_length:
raise IndexError("Token length more than max seq length!")
segments = []
current_segment_id = 0
for token in tokens:
segments.append(current_segment_id)
if token == "[SEP]":
current_segment_id = 1
return np.array(segments + [0] * (max_seq_length - len(tokens)))
################## Create word ids for BERT
max_seq_length = 512
train_title_tokens_ids = []
test_title_tokens_ids = []
train_description_tokens_ids = []
test_description_tokens_ids = []
for i in np.arange(0, len(train_titles_tokens)):
this_train_title_token_id = get_ids(train_titles_tokens[i], tokenizer, max_seq_length=max_seq_length)
train_title_tokens_ids.append(this_train_title_token_id)
for i in np.arange(0, len(test_titles_tokens)):
this_test_title_token_id = get_ids(test_titles_tokens[i], tokenizer, max_seq_length=max_seq_length)
test_title_tokens_ids.append(this_test_title_token_id)
for i in np.arange(0, len(description_train_tokens)):
this_train_description_token_id = get_ids(description_train_tokens[i], tokenizer, max_seq_length=max_seq_length)
train_description_tokens_ids.append(this_train_description_token_id)
for i in np.arange(0, len(description_test_tokens)):
this_test_description_token_id = get_ids(description_test_tokens[i], tokenizer, max_seq_length=max_seq_length)
test_description_tokens_ids.append(this_test_description_token_id)
################## Create text masks for BERT
max_seq_length = 512
train_title_tokens_masks = []
test_title_tokens_masks = []
train_description_tokens_masks = []
test_description_tokens_masks = []
for i in np.arange(0, len(train_titles_tokens)):
this_train_title_token_mask = get_masks(train_titles_tokens[i], max_seq_length=max_seq_length)
train_title_tokens_masks.append(this_train_title_token_mask)
for i in np.arange(0, len(test_titles_tokens)):
this_test_title_token_mask = get_masks(test_titles_tokens[i], max_seq_length=max_seq_length)
test_title_tokens_masks.append(this_test_title_token_mask)
for i in np.arange(0, len(description_train_tokens)):
this_train_description_token_mask = get_masks(description_train_tokens[i], max_seq_length=max_seq_length)
train_description_tokens_masks.append(this_train_description_token_mask)
for i in np.arange(0, len(description_test_tokens)):
this_test_description_token_mask = get_masks(description_test_tokens[i], max_seq_length=max_seq_length)
test_description_tokens_masks.append(this_test_description_token_mask)
################## Create text segments for BERT
max_seq_length = 512
train_title_tokens_segs = []
test_title_tokens_segs = []
train_description_tokens_segs = []
test_description_tokens_segs = []
input_seg = []
for i in np.arange(0, len(train_titles_tokens)):
this_train_title_token_seg = get_segments(train_titles_tokens[i], max_seq_length=max_seq_length)
train_title_tokens_segs.append(this_train_title_token_seg)
for i in np.arange(0, len(test_titles_tokens)):
this_test_title_token_seg = get_segments(test_titles_tokens[i], max_seq_length=max_seq_length)
test_title_tokens_segs.append(this_test_title_token_seg)
for i in np.arange(0, len(description_train_tokens)):
this_train_description_token_seg = get_segments(description_train_tokens[i], max_seq_length=max_seq_length)
train_description_tokens_segs.append(this_train_description_token_seg)
for i in np.arange(0, len(description_test_tokens)):
this_test_description_token_seg = get_segments(description_test_tokens[i], max_seq_length=max_seq_length)
test_description_tokens_segs.append(this_test_description_token_seg)
# Prepping for generator
child_dict1 = dict(zip(ids, train_description_tokens_ids))
child_dict2 = dict(zip(ids, train_description_tokens_masks))
child_dict3 = dict(zip(ids, train_description_tokens_segs))
child_dict4 = dict(zip(ids, train_title_tokens_ids))
child_dict5 = dict(zip(ids, train_title_tokens_masks))
child_dict6 = dict(zip(ids, train_title_tokens_segs))
dict_subset = [child_dict1, child_dict2, child_dict3, child_dict4, child_dict5, child_dict6]
parent_dict = {}
for i in child_dict1.keys():
parent_dict[i] = tuple(parent_dict[i] for parent_dict in dict_subset)
with open('BERT_VAL_INPUTS_dict.pickle', 'wb') as handle:
pickle.dump(parent_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
#############################################################################################################
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
################ Here lies the second iteration of the code, where train gets E2E processing ################
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
#############################################################################################################
os.chdir('/users/paula/scratch/train')
x = "/users/paula/scratch/train"
subdirs = [os.path.join(x, o) for o in os.listdir(x) if os.path.isdir(os.path.join(x,o))]
new_dirs=[]
files = []
for dirs in subdirs:
# r=root, d=directories, f = files
for r, d, f in os.walk(dirs):
for file in f:
files.append(os.path.join(r, file))
new_dirs.append(os.path.split(dirs)[1])
df = pd.DataFrame(list(zip(files, new_dirs)), columns=['metadata_file','target'])
# To list all files and their full paths (without extensions):
xyz=pd.Series()
for i in np.arange(len(df)):
df['metadata_file'][i] = df['metadata_file'][i].split(".")[0]
df = df.drop_duplicates().copy()
filelist = list(df.metadata_file)
classlist = list(df.target)
descriptions=[]
ids=[]
targets=[]
pathz=[]
new_dirs=[]
titles=[]
for file in filelist:
with open(file) as f:
json_data = json.load(f)
descriptions.append(json_data['description'])
ids.append(json_data['id'])
titles.append(json_data['title'])
new_titles = []
new_descriptions = []
# encode as ascii, decode to convert back to characters from bytes. Also, do regex cleanup on special characters not indicative of meaning
for title in titles:
new_titles.append(re.sub(r"[^a-zA-Z?.!,¿#@]+", " ",title.encode('ascii',errors='ignore').decode('UTF-8')))
for description in descriptions:
new_descriptions.append(re.sub(r"[^a-zA-Z?.!,¿#@]+", " ",description.encode('ascii',errors='ignore').decode('UTF-8')))
for i in range(len(new_titles)):
new_titles[i] = new_titles[i].lower()
for i in range(len(new_descriptions)):
new_descriptions[i] = new_descriptions[i].lower()
for i in range(len(new_titles)):
new_titles[i] = new_titles[i].lower()
for i in range(len(new_descriptions)):
new_descriptions[i] = new_descriptions[i].lower()
targets =
|
pd.Series(df['target'])
|
pandas.Series
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
jobs =
|
pd.read_csv('data/stackoverflow_jobs_enhanced.csv', thousands=',')
|
pandas.read_csv
|
# coding: utf-8
"""基于HDF文件的数据库"""
import pandas as pd
import numpy as np
import os
import warnings
from multiprocessing import Lock
from ..utils.datetime_func import Datetime2DateStr, DateStr2Datetime
from ..utils.tool_funcs import ensure_dir_exists
from ..utils.disk_persist_provider import DiskPersistProvider
from .helpers import handle_ids, FIFODict
from pathlib import Path
from FactorLib.utils.tool_funcs import is_non_string_iterable
pd.options.compute.use_numexpr = True
lock = Lock()
warnings.simplefilter('ignore', category=FutureWarning)
def append_along_index(df1, df2):
df1, df2 = df1.align(df2, axis='columns')
new = pd.DataFrame(np.vstack((df1.values, df2.values)),
columns=df1.columns,
index=df1.index.append(df2.index))
new.sort_index(inplace=True)
return new
def auto_increase_keys(_dict, keys):
if _dict:
max_v = max(_dict.values())
else:
max_v = 0
for key in keys:
if key not in _dict:
max_v += 1
_dict[key] = max_v
return _dict
class H5DB(object):
def __init__(self, data_path, max_cached_files=30):
self.data_path = str(data_path)
self.feather_data_path = os.path.abspath(self.data_path+'/../feather')
self.csv_data_path = os.path.abspath(self.data_path+'/../csv')
self.data_dict = None
self.cached_data = FIFODict(max_cached_files)
self.max_cached_files = max_cached_files
# self._update_info()
def _update_info(self):
factor_list = []
for root, subdirs, files in os.walk(self.data_path):
relpath = "/%s/"%os.path.relpath(root, self.data_path).replace("\\", "/")
for file in files:
if file.endswith(".h5"):
factor_list.append([relpath, file[:-3]])
self.data_dict = pd.DataFrame(
factor_list, columns=['path', 'name'])
def _read_h5file(self, file_path, key):
if file_path in self.cached_data:
return self.cached_data[file_path]
lock.acquire()
try:
data = pd.read_hdf(file_path, key)
except KeyError:
data = pd.read_hdf(file_path, 'data')
finally:
lock.release()
# update at 2020.02.15: surpport wide dataframe
columns_mapping = self._read_columns_mapping(file_path)
if not columns_mapping.empty:
data.rename(
columns=pd.Series(columns_mapping.index, index=columns_mapping.to_numpy()),
inplace=True
)
if self.max_cached_files > 0:
self.cached_data[file_path] = data
return data
def _read_columns_mapping(self, file_path):
try:
data = pd.read_hdf(file_path, 'column_name_mapping')
except KeyError:
data = pd.Series()
return data
def _normalize_columns(self, input, column_mapping):
return column_mapping[column_mapping.index.isin(input)].tolist()
def _save_h5file(self, data, file_path, key,
complib='blosc', complevel=9,
mode='w', **kwargs):
try:
lock.acquire()
# update at 2020.02.15: surpport wide dataframe
if data.shape[1] > 1000:
columns_mapping = {x:y for x, y in zip(data.columns, range(data.shape[1]))}
data2 = data.rename(columns=columns_mapping)
else:
data2 = data
columns_mapping = {}
with pd.HDFStore(file_path, mode=mode, complevel=complevel,
complib=complib) as f:
f.put(key, data2, **kwargs)
f.put('column_name_mapping', pd.Series(columns_mapping))
if file_path in self.cached_data:
self.cached_data.update({file_path: data})
lock.release()
except Exception as e:
lock.release()
raise e
def _read_pklfile(self, file_path):
if file_path in self.cached_data:
return self.cached_data[file_path]
lock.acquire()
try:
d = pd.read_pickle(file_path)
if self.max_cached_files > 0:
self.cached_data[file_path] = d
lock.release()
except Exception as e:
lock.release()
raise e
return d
def _save_pklfile(self, data, file_dir, name, protocol=-1):
dumper = DiskPersistProvider(
os.path.join(self.data_path, file_dir.strip('/')))
file_path = os.path.join(
self.data_path, file_dir.strip('/'), name+'.pkl'
)
lock.acquire()
try:
dumper.dump(data, name, protocol)
if file_path in self.cached_data:
self.cached_data[file_path] = data
except Exception as e:
lock.release()
raise e
lock.release()
def _delete_cached_factor(self, file_path):
if file_path in self.cached_data:
del self.cached_data[file_path]
def set_data_path(self, path):
self.data_path = path
# self._update_info()
# ---------------------------因子管理---------------------------------------
# 查看因子是否存在
def check_factor_exists(self, factor_name, factor_dir='/'):
file_path = self.abs_factor_path(factor_dir, factor_name)
return os.path.isfile(file_path)
# 删除因子
def delete_factor(self, factor_name, factor_dir='/'):
factor_path = self.abs_factor_path(factor_dir, factor_name)
try:
os.remove(factor_path)
self._delete_cached_factor(factor_path)
except Exception as e:
print(e)
pass
self._update_info()
# 列出因子名称
def list_factors(self, factor_dir):
dir_path = self.data_path + factor_dir
factors = [x[:-3] for x in os.listdir(dir_path) if x.endswith('.h5')]
return factors
# 重命名因子
def rename_factor(self, old_name, new_name, factor_dir):
factor_path = self.abs_factor_path(factor_dir, old_name)
temp_factor_path = self.abs_factor_path(factor_dir, new_name)
factor_data = self._read_h5file(factor_path, old_name).rename(columns={old_name: new_name})
self._save_h5file(factor_data, temp_factor_path, new_name)
self.delete_factor(old_name, factor_dir)
# 新建因子文件夹
def create_factor_dir(self, factor_dir):
if not os.path.isdir(self.data_path+factor_dir):
os.makedirs(self.data_path+factor_dir)
# 因子的时间区间
def get_date_range(self, factor_name, factor_path):
try:
max_date = self.read_h5file_attr(factor_name, factor_path, 'max_date')
min_date = self.read_h5file_attr(factor_name, factor_path, 'min_date')
except Exception:
try:
panel = self._read_h5file(
self.abs_factor_path(factor_path, factor_name), key='data')
except KeyError:
panel = self._read_h5file(
self.abs_factor_path(factor_path, factor_name), key=factor_name)
if isinstance(panel, pd.Panel):
min_date = Datetime2DateStr(panel.major_axis.min())
max_date = Datetime2DateStr(panel.major_axis.max())
else:
min_date = panel.index.get_level_values('date').min()
max_date = panel.index.get_level_values('date').max()
return min_date, max_date
# 读取多列因子的属性
def read_h5file_attr(self, factor_name, factor_path):
attr_file_path = self.abs_factor_attr_path(factor_path, factor_name)
print(attr_file_path)
if os.path.isfile(attr_file_path):
return self._read_pklfile(attr_file_path)
else:
raise FileNotFoundError('找不到因子属性文件!')
def clear_cache(self):
self.cached_data = FIFODict(self.max_cached_files)
# --------------------------数据管理-------------------------------------------
@handle_ids
def load_factor(self, factor_name, factor_dir=None, dates=None, ids=None, idx=None,
date_level=0):
"""
加载一个因子
因子格式
-------
因子的存储格式是DataFrame(index=[date,IDs], columns=factor)
Parameters:
-----------
factor_name: str
因子名称
factor_dir: str
因子路径
dates: list
日期
ids: list
代码
idx: DataFrame or Series
索引
date_level: int
日期索引在多层次索引中的位置
"""
if idx is not None:
dates = idx.index.get_level_values('date').unique()
return (self
.load_factor(factor_name, factor_dir=factor_dir, dates=dates)
.reindex(idx.index, copy=False)
)
factor_path = self.abs_factor_path(factor_dir, factor_name)
data = self._read_h5file(factor_path, factor_name)
query_str = ""
if ids is not None:
if isinstance(ids, list):
query_str += "IDs in @ids"
else:
query_str += "IDs == @ids"
if len(query_str) > 0:
query_str += " and "
if dates is not None:
if is_non_string_iterable(dates):
query_str += "date in @dates"
else:
query_str += "date == @dates"
if query_str.endswith(" and "):
query_str = query_str.strip(" and ")
if query_str:
df = data.query(query_str)
return df
else:
return data
def load_factor2(self, factor_name, factor_dir=None, dates=None, ids=None, idx=None,
stack=False, check_A=False):
"""加载另外一种类型的因子
因子的格式是一个二维DataFrame,行索引是DatetimeIndex,列索引是股票代码。
check_A: 过滤掉非A股股票
"""
if idx is not None:
dates = idx.index.get_level_values('date').unique().tolist()
ids = idx.index.get_level_values('IDs').unique().tolist()
factor_path = self.abs_factor_path(factor_dir, factor_name)
columns_mapping = self._read_columns_mapping(factor_path)
if not columns_mapping.empty and ids is not None:
ids_normalized = self._normalize_columns(ids, columns_mapping)
if not ids_normalized:
return pd.DataFrame(columns=ids)
else:
ids_normalized = ids
where_term = None
if dates is not None:
dates = pd.to_datetime(dates)
where_term = "index in dates"
with
|
pd.HDFStore(factor_path, mode='r')
|
pandas.HDFStore
|
import copy
import datetime as dt
import logging
import os
import re
import warnings
from datetime import datetime
from unittest.mock import patch
import cftime
import numpy as np
import pandas as pd
import pytest
from numpy import testing as npt
from packaging.version import parse
from pandas.errors import UnsupportedFunctionCall
from pint.errors import DimensionalityError, UndefinedUnitError
from scmdata.errors import (
DuplicateTimesError,
MissingRequiredColumnError,
NonUniqueMetadataError,
)
from scmdata.run import BaseScmRun, ScmRun, run_append
from scmdata.testing import (
_check_pandas_less_110,
_check_pandas_less_120,
assert_scmdf_almost_equal,
)
@pytest.fixture
def scm_run_interpolated(scm_run):
return scm_run.interpolate(
[
dt.datetime(y, 1, 1)
for y in range(scm_run["year"].min(), scm_run["year"].max() + 1)
]
)
def test_init_df_year_converted_to_datetime(test_pd_df):
res = ScmRun(test_pd_df)
assert (res["year"].unique() == [2005, 2010, 2015]).all()
assert (
res["time"].unique()
== [dt.datetime(2005, 1, 1), dt.datetime(2010, 1, 1), dt.datetime(2015, 1, 1)]
).all()
@pytest.mark.parametrize(
"in_format",
[
"pd.Series",
"year_col",
"year_col_index",
"time_col",
"time_col_index",
"time_col_str_simple",
"time_col_str_complex",
"time_col_reversed",
"str_times",
],
)
def test_init_df_formats(test_pd_run_df, in_format):
if in_format == "pd.Series":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year").set_index(
idx + ["year"]
)["value"]
elif in_format == "year_col":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year")
elif in_format == "year_col_index":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year").set_index(
idx + ["year"]
)
elif in_format == "time_col":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year")
test_init["time"] = test_init["year"].apply(lambda x: dt.datetime(x, 1, 1))
test_init = test_init.drop("year", axis="columns")
elif in_format == "time_col_index":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year")
test_init["time"] = test_init["year"].apply(lambda x: dt.datetime(x, 1, 1))
test_init = test_init.drop("year", axis="columns")
test_init = test_init.set_index(idx + ["time"])
elif in_format == "time_col_str_simple":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year")
test_init["time"] = test_init["year"].apply(
lambda x: "{}-1-1 00:00:00".format(x)
)
test_init = test_init.drop("year", axis="columns")
elif in_format == "time_col_str_complex":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year")
test_init["time"] = test_init["year"].apply(lambda x: "{}/1/1".format(x))
test_init = test_init.drop("year", axis="columns")
elif in_format == "time_col_reversed":
test_init = test_pd_run_df[test_pd_run_df.columns[::-1]]
elif in_format == "str_times":
test_init = test_pd_run_df.copy()
test_init.columns = test_init.columns.map(
lambda x: "{}/1/1".format(x) if isinstance(x, int) else x
)
res = ScmRun(test_init)
assert (res["year"].unique() == [2005, 2010, 2015]).all()
assert (
res["time"].unique()
== [dt.datetime(2005, 1, 1), dt.datetime(2010, 1, 1), dt.datetime(2015, 1, 1)]
).all()
assert "Start: 2005" in res.__repr__()
assert "End: 2015" in res.__repr__()
res_df = res.timeseries()
res_df.columns = res_df.columns.map(lambda x: x.year)
res_df = res_df.reset_index()
pd.testing.assert_frame_equal(
res_df[test_pd_run_df.columns.tolist()], test_pd_run_df, check_like=True,
)
def test_init_df_missing_time_axis_error(test_pd_df):
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_df.melt(id_vars=idx, var_name="year")
test_init = test_init.drop("year", axis="columns")
error_msg = re.escape("invalid time format, must have either `year` or `time`!")
with pytest.raises(ValueError, match=error_msg):
ScmRun(test_init)
def test_init_df_missing_time_columns_error(test_pd_df):
test_init = test_pd_df.copy()
test_init = test_init.drop(
test_init.columns[test_init.columns.map(lambda x: isinstance(x, int))],
axis="columns",
)
error_msg = re.escape(
"invalid column format, must contain some time (int, float or datetime) "
"columns!"
)
with pytest.raises(ValueError, match=error_msg):
ScmRun(test_init)
def test_init_df_missing_col_error(test_pd_df):
test_pd_df = test_pd_df.drop("model", axis="columns")
error_msg = re.escape("Missing required columns `['model']`!")
with pytest.raises(MissingRequiredColumnError, match=error_msg):
ScmRun(test_pd_df)
def test_init_ts_missing_col_error(test_ts):
error_msg = re.escape("Missing required columns `['model']`!")
with pytest.raises(MissingRequiredColumnError, match=error_msg):
ScmRun(
test_ts,
columns={
"climate_model": ["a_model"],
"scenario": ["a_scenario", "a_scenario", "a_scenario2"],
"region": ["World"],
"variable": ["Primary Energy", "Primary Energy|Coal", "Primary Energy"],
"unit": ["EJ/yr"],
},
index=[2005, 2010, 2015],
)
def test_init_required_cols(test_pd_df):
class MyRun(BaseScmRun):
required_cols = ("climate_model", "variable", "unit")
del test_pd_df["model"]
assert all([c in test_pd_df.columns for c in MyRun.required_cols])
MyRun(test_pd_df)
del test_pd_df["climate_model"]
assert not all([c in test_pd_df.columns for c in MyRun.required_cols])
error_msg = re.escape("Missing required columns `['climate_model']`!")
with pytest.raises(
MissingRequiredColumnError, match=error_msg,
):
MyRun(test_pd_df)
def test_init_multiple_file_error():
error_msg = re.escape(
"Initialising from multiple files not supported, use "
"`scmdata.run.ScmRun.append()`"
)
with pytest.raises(ValueError, match=error_msg):
ScmRun(["file_1", "filepath_2"])
def test_init_unrecognised_type_error():
fail_type = {"dict": "key"}
error_msg = re.escape("Cannot load {} from {}".format(str(ScmRun), type(fail_type)))
with pytest.raises(TypeError, match=error_msg):
ScmRun(fail_type)
def test_init_ts_col_string(test_ts):
res = ScmRun(
test_ts,
columns={
"model": "an_iam",
"climate_model": "a_model",
"scenario": ["a_scenario", "a_scenario", "a_scenario2"],
"region": "World",
"variable": ["Primary Energy", "Primary Energy|Coal", "Primary Energy"],
"unit": "EJ/yr",
},
index=[2005, 2010, 2015],
)
npt.assert_array_equal(res["model"].unique(), "an_iam")
npt.assert_array_equal(res["climate_model"].unique(), "a_model")
npt.assert_array_equal(res["region"].unique(), "World")
npt.assert_array_equal(res["unit"].unique(), "EJ/yr")
@pytest.mark.parametrize("fail_setting", [["a_iam", "a_iam"]])
def test_init_ts_col_wrong_length_error(test_ts, fail_setting):
correct_scenarios = ["a_scenario", "a_scenario", "a_scenario2"]
error_msg = re.escape(
"Length of column 'model' is incorrect. It should be length 1 or {}".format(
len(correct_scenarios)
)
)
with pytest.raises(ValueError, match=error_msg):
ScmRun(
test_ts,
columns={
"model": fail_setting,
"climate_model": ["a_model"],
"scenario": correct_scenarios,
"region": ["World"],
"variable": ["Primary Energy", "Primary Energy|Coal", "Primary Energy"],
"unit": ["EJ/yr"],
},
index=[2005, 2010, 2015],
)
def get_test_pd_df_with_datetime_columns(tpdf):
return tpdf.rename(
{
2005.0: dt.datetime(2005, 1, 1),
2010.0: dt.datetime(2010, 1, 1),
2015.0: dt.datetime(2015, 1, 1),
},
axis="columns",
)
def test_init_with_ts(test_ts, test_pd_df):
df = ScmRun(
test_ts,
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario", "a_scenario", "a_scenario2"],
"region": ["World"],
"variable": ["Primary Energy", "Primary Energy|Coal", "Primary Energy"],
"unit": ["EJ/yr"],
},
index=[2005, 2010, 2015],
)
tdf = get_test_pd_df_with_datetime_columns(test_pd_df)
pd.testing.assert_frame_equal(df.timeseries().reset_index(), tdf, check_like=True)
b = ScmRun(test_pd_df)
assert_scmdf_almost_equal(df, b, check_ts_names=False)
def test_init_with_scmdf(test_scm_run_datetimes, test_scm_datetime_run):
df = ScmRun(test_scm_run_datetimes,)
assert_scmdf_almost_equal(df, test_scm_datetime_run, check_ts_names=False)
@pytest.mark.parametrize(
"years", [["2005.0", "2010.0", "2015.0"], ["2005", "2010", "2015"]]
)
def test_init_with_years_as_str(test_pd_df, years):
df = copy.deepcopy(
test_pd_df
) # This needs to be a deep copy so it doesn't break the other tests
cols = copy.deepcopy(test_pd_df.columns.values)
cols[-3:] = years
df.columns = cols
df = ScmRun(df)
obs = df.time_points.values
exp = np.array(
[dt.datetime(2005, 1, 1), dt.datetime(2010, 1, 1), dt.datetime(2015, 1, 1)],
dtype="datetime64[s]",
)
assert (obs == exp).all()
def test_init_with_year_columns(test_pd_df):
df = ScmRun(test_pd_df)
tdf = get_test_pd_df_with_datetime_columns(test_pd_df)
pd.testing.assert_frame_equal(df.timeseries().reset_index(), tdf, check_like=True)
def test_init_with_decimal_years():
inp_array = [2.0, 1.2, 7.9]
d = pd.Series(inp_array, index=[1765.0, 1765.083, 1765.167])
cols = {
"model": ["a_model"],
"scenario": ["a_scenario"],
"region": ["World"],
"variable": ["Primary Energy"],
"unit": ["EJ/yr"],
}
res = ScmRun(d, columns=cols)
assert (
res["time"].unique()
== [
dt.datetime(1765, 1, 1, 0, 0),
dt.datetime(1765, 1, 31, 7, 4, 48),
dt.datetime(1765, 3, 2, 22, 55, 11),
]
).all()
npt.assert_array_equal(res.values[0], inp_array)
def test_init_df_from_timeseries(test_scm_df_mulitple):
df = ScmRun(test_scm_df_mulitple.timeseries())
assert_scmdf_almost_equal(df, test_scm_df_mulitple, check_ts_names=False)
def test_init_df_with_extra_col(test_pd_df):
tdf = test_pd_df.copy()
extra_col = "test value"
extra_value = "scm_model"
tdf[extra_col] = extra_value
df = ScmRun(tdf)
tdf = get_test_pd_df_with_datetime_columns(tdf)
assert extra_col in df.meta
pd.testing.assert_frame_equal(df.timeseries().reset_index(), tdf, check_like=True)
def test_init_df_without_required_arguments(test_run_ts):
with pytest.raises(ValueError, match="`columns` argument is required"):
ScmRun(test_run_ts, index=[2000, 20005, 2010], columns=None)
with pytest.raises(ValueError, match="`index` argument is required"):
ScmRun(test_run_ts, index=None, columns={"variable": "test"})
def test_init_iam(test_iam_df, test_pd_df):
a = ScmRun(test_iam_df)
b = ScmRun(test_pd_df)
assert_scmdf_almost_equal(a, b, check_ts_names=False)
def test_init_self(test_iam_df):
a = ScmRun(test_iam_df)
b = ScmRun(a)
assert_scmdf_almost_equal(a, b)
def test_init_with_metadata(scm_run):
expected_metadata = {"test": "example"}
b = ScmRun(scm_run.timeseries(), metadata=expected_metadata)
# Data should be copied
assert id(b.metadata) != id(expected_metadata)
assert b.metadata == expected_metadata
def test_init_self_with_metadata(scm_run):
scm_run.metadata["test"] = "example"
b = ScmRun(scm_run)
assert id(scm_run.metadata) != id(b.metadata)
assert scm_run.metadata == b.metadata
c = ScmRun(scm_run, metadata={"test": "other"})
assert c.metadata == {"test": "other"}
def _check_copy(a, b, copy_data):
if copy_data:
assert id(a.values.base) != id(b.values.base)
else:
assert id(a.values.base) == id(b.values.base)
@pytest.mark.parametrize("copy_data", [True, False])
def test_init_with_copy_run(copy_data, scm_run):
res = ScmRun(scm_run, copy_data=copy_data)
assert id(res) != id(scm_run)
_check_copy(res._df, scm_run._df, copy_data)
@pytest.mark.parametrize("copy_data", [True, False])
def test_init_with_copy_dataframe(copy_data, test_pd_df):
res = ScmRun(test_pd_df, copy_data=copy_data)
# an incoming pandas DF no longer references the original
_check_copy(res._df, test_pd_df, True)
def test_init_duplicate_columns(test_pd_df):
exp_msg = (
"Duplicate times (numbers show how many times the given " "time is repeated)"
)
inp = pd.concat([test_pd_df, test_pd_df[2015]], axis=1)
with pytest.raises(DuplicateTimesError) as exc_info:
ScmRun(inp)
error_msg = exc_info.value.args[0]
assert error_msg.startswith(exp_msg)
pd.testing.assert_index_equal(
pd.Index([2005, 2010, 2015, 2015], dtype="object", name="time"),
exc_info.value.time_index,
)
def test_init_empty(scm_run):
empty_run = ScmRun()
assert empty_run.empty
assert empty_run.filter(model="*").empty
empty_run.append(scm_run, inplace=True)
assert not empty_run.empty
def test_repr_empty():
empty_run = ScmRun()
assert str(empty_run) == empty_run.__repr__()
repr = str(empty_run)
assert "Start: N/A" in repr
assert "End: N/A" in repr
assert "timeseries: 0, timepoints: 0" in repr
def test_as_iam(test_iam_df, test_pd_df, iamdf_type):
df = ScmRun(test_pd_df).to_iamdataframe()
# test is skipped by test_iam_df fixture if pyam isn't installed
assert isinstance(df, iamdf_type)
pd.testing.assert_frame_equal(test_iam_df.meta, df.meta)
# we switch to time so ensure sensible comparison of columns
tdf = df.data.copy()
tdf["year"] = tdf["time"].apply(lambda x: x.year)
tdf.drop("time", axis="columns", inplace=True)
pd.testing.assert_frame_equal(test_iam_df.data, tdf, check_like=True)
def test_get_item(scm_run):
assert scm_run["model"].unique() == ["a_iam"]
@pytest.mark.parametrize(
"value,output",
(
(1, [np.nan, np.nan, 1.0]),
(1.0, (np.nan, np.nan, 1.0)),
("test", ["nan", "nan", "test"]),
),
)
def test_get_item_with_nans(scm_run, value, output):
expected_values = [np.nan, np.nan, value]
scm_run["extra"] = expected_values
exp = pd.Series(output, name="extra")
pd.testing.assert_series_equal(scm_run["extra"], exp, check_exact=value != "test")
def test_get_item_not_in_meta(scm_run):
dud_key = 0
error_msg = re.escape("[{}] is not in metadata".format(dud_key))
with pytest.raises(KeyError, match=error_msg):
scm_run[dud_key]
def test_set_item(scm_run):
scm_run["model"] = ["a_iam", "b_iam", "c_iam"]
assert all(scm_run["model"] == ["a_iam", "b_iam", "c_iam"])
def test_set_item_not_in_meta(scm_run):
with pytest.raises(ValueError):
scm_run["junk"] = ["hi", "bye"]
scm_run["junk"] = ["hi", "bye", "ciao"]
assert all(scm_run["junk"] == ["hi", "bye", "ciao"])
def test_len(scm_run):
assert len(scm_run) == len(scm_run.timeseries())
def test_shape(scm_run):
assert scm_run.shape == scm_run.timeseries().shape
def test_head(scm_run):
pd.testing.assert_frame_equal(scm_run.head(2), scm_run.timeseries().head(2))
def test_tail(scm_run):
pd.testing.assert_frame_equal(scm_run.tail(1), scm_run.timeseries().tail(1))
def test_values(scm_run):
# implicitly checks that `.values` returns the data with each row being a
# timeseries and each column being a timepoint
npt.assert_array_equal(scm_run.values, scm_run.timeseries().values)
def test_variable_depth_0(scm_run):
obs = list(scm_run.filter(level=0)["variable"].unique())
exp = ["Primary Energy"]
assert obs == exp
def test_variable_depth_0_with_base():
tdf = ScmRun(
data=np.array([[1, 6.0, 7], [0.5, 3, 2], [2, 7, 0], [-1, -2, 3]]).T,
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario"],
"region": ["World"],
"variable": [
"Primary Energy",
"Primary Energy|Coal",
"Primary Energy|Coal|Electricity",
"Primary Energy|Gas|Heating",
],
"unit": ["EJ/yr"],
},
index=[
dt.datetime(2005, 1, 1),
dt.datetime(2010, 1, 1),
dt.datetime(2015, 6, 12),
],
)
obs = list(tdf.filter(variable="Primary Energy|*", level=1)["variable"].unique())
exp = ["Primary Energy|Coal|Electricity", "Primary Energy|Gas|Heating"]
assert all([e in obs for e in exp]) and len(obs) == len(exp)
def test_variable_depth_0_keep_false(scm_run):
obs = list(scm_run.filter(level=0, keep=False)["variable"].unique())
exp = ["Primary Energy|Coal"]
assert obs == exp
def test_variable_depth_0_minus(scm_run):
obs = list(scm_run.filter(level="0-")["variable"].unique())
exp = ["Primary Energy"]
assert obs == exp
def test_variable_depth_0_plus(scm_run):
obs = list(scm_run.filter(level="0+")["variable"].unique())
exp = ["Primary Energy", "Primary Energy|Coal"]
assert obs == exp
def test_variable_depth_1(scm_run):
obs = list(scm_run.filter(level=1)["variable"].unique())
exp = ["Primary Energy|Coal"]
assert obs == exp
def test_variable_depth_1_minus(scm_run):
obs = list(scm_run.filter(level="1-")["variable"].unique())
exp = ["Primary Energy", "Primary Energy|Coal"]
assert obs == exp
def test_variable_depth_1_plus(scm_run):
obs = list(scm_run.filter(level="1+")["variable"].unique())
exp = ["Primary Energy|Coal"]
assert obs == exp
def test_variable_depth_raises(scm_run):
pytest.raises(ValueError, scm_run.filter, level="1/")
def test_filter_error(scm_run):
pytest.raises(ValueError, scm_run.filter, foo="foo")
def test_filter_year(test_scm_run_datetimes):
obs = test_scm_run_datetimes.filter(year=2005)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
def test_filter_year_error(test_scm_run_datetimes):
error_msg = re.escape("`year` can only be filtered with ints or lists of ints")
with pytest.raises(TypeError, match=error_msg):
test_scm_run_datetimes.filter(year=2005.0)
def test_filter_year_with_own_year(test_scm_run_datetimes):
res = test_scm_run_datetimes.filter(year=test_scm_run_datetimes["year"].values)
assert (res["year"].unique() == test_scm_run_datetimes["year"].unique()).all()
@pytest.mark.parametrize(
"year_list", ([2005, 2010], (2005, 2010), np.array([2005, 2010]).astype(int),)
)
def test_filter_year_list(year_list, test_scm_run_datetimes):
res = test_scm_run_datetimes.filter(year=year_list)
expected = [2005, 2010]
assert (res["year"].unique() == expected).all()
def test_filter_inplace(test_scm_run_datetimes):
test_scm_run_datetimes.filter(year=2005, inplace=True)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = test_scm_run_datetimes["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
@pytest.mark.parametrize("test_month", [6, "June", "Jun", "jun", ["Jun", "jun"]])
def test_filter_month(test_scm_run_datetimes, test_month):
obs = test_scm_run_datetimes.filter(month=test_month)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
@pytest.mark.parametrize("test_month", [6, "Jun", "jun", ["Jun", "jun"]])
def test_filter_year_month(test_scm_run_datetimes, test_month):
obs = test_scm_run_datetimes.filter(year=2005, month=test_month)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
@pytest.mark.parametrize("test_day", [17, "Fri", "Friday", "friday", ["Fri", "fri"]])
def test_filter_day(test_scm_run_datetimes, test_day):
obs = test_scm_run_datetimes.filter(day=test_day)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
@pytest.mark.parametrize("test_hour", [12, [12, 13]])
def test_filter_hour(test_scm_run_datetimes, test_hour):
obs = test_scm_run_datetimes.filter(hour=test_hour)
test_hour = [test_hour] if isinstance(test_hour, int) else test_hour
expected_rows = (
test_scm_run_datetimes["time"].apply(lambda x: x.hour).isin(test_hour)
)
expected = test_scm_run_datetimes["time"].loc[expected_rows].unique()
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected[0]
def test_filter_hour_multiple(test_scm_run_datetimes):
obs = test_scm_run_datetimes.filter(hour=0)
expected_rows = test_scm_run_datetimes["time"].apply(lambda x: x.hour).isin([0])
expected = test_scm_run_datetimes["time"].loc[expected_rows].unique()
unique_time = obs["time"].unique()
assert len(unique_time) == 2
assert all([dt in unique_time for dt in expected])
def test_filter_time_exact_match(test_scm_run_datetimes):
obs = test_scm_run_datetimes.filter(time=dt.datetime(2005, 6, 17, 12))
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
def test_filter_time_range(test_scm_run_datetimes):
error_msg = r".*datetime.datetime.*"
with pytest.raises(TypeError, match=error_msg):
test_scm_run_datetimes.filter(
year=range(dt.datetime(2000, 6, 17), dt.datetime(2009, 6, 17))
)
def test_filter_time_range_year(test_scm_run_datetimes):
obs = test_scm_run_datetimes.filter(year=range(2000, 2008))
unique_time = obs["time"].unique()
expected = dt.datetime(2005, 6, 17, 12)
assert len(unique_time) == 1
assert unique_time[0] == expected
@pytest.mark.parametrize("month_range", [range(3, 7), "Mar-Jun"])
def test_filter_time_range_month(test_scm_run_datetimes, month_range):
obs = test_scm_run_datetimes.filter(month=month_range)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
def test_filter_time_range_month_unrecognised_error(test_scm_run_datetimes):
fail_filter = "Marb-Jun"
error_msg = re.escape(
"Could not convert month '{}' to integer".format(
[m for m in fail_filter.split("-")]
)
)
with pytest.raises(ValueError, match=error_msg):
test_scm_run_datetimes.filter(month=fail_filter)
@pytest.mark.parametrize("month_range", [["Mar-Jun", "Nov-Feb"]])
def test_filter_time_range_round_the_clock_error(test_scm_run_datetimes, month_range):
error_msg = re.escape(
"string ranges must lead to increasing integer ranges, "
"Nov-Feb becomes [11, 2]"
)
with pytest.raises(ValueError, match=error_msg):
test_scm_run_datetimes.filter(month=month_range)
@pytest.mark.parametrize("day_range", [range(14, 20), "Thu-Sat"])
def test_filter_time_range_day(test_scm_run_datetimes, day_range):
obs = test_scm_run_datetimes.filter(day=day_range)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
def test_filter_time_range_day_unrecognised_error(test_scm_run_datetimes):
fail_filter = "Thud-Sat"
error_msg = re.escape(
"Could not convert day '{}' to integer".format(
[m for m in fail_filter.split("-")]
)
)
with pytest.raises(ValueError, match=error_msg):
test_scm_run_datetimes.filter(day=fail_filter)
@pytest.mark.parametrize("hour_range", [range(10, 14)])
def test_filter_time_range_hour(test_scm_run_datetimes, hour_range):
obs = test_scm_run_datetimes.filter(hour=hour_range)
expected_rows = (
test_scm_run_datetimes["time"].apply(lambda x: x.hour).isin(hour_range)
)
expected = test_scm_run_datetimes["time"][expected_rows].unique()
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected[0]
def test_filter_time_no_match(test_scm_datetime_run):
obs = test_scm_datetime_run.filter(time=dt.datetime(2004, 6, 18))
assert len(obs.time_points) == 0
assert obs.shape[1] == 0
assert obs.values.shape[1] == 0
def test_filter_time_not_datetime_error(test_scm_run_datetimes):
error_msg = re.escape("`time` can only be filtered with datetimes")
with pytest.raises(TypeError, match=error_msg):
test_scm_run_datetimes.filter(time=2005)
def test_filter_time_not_datetime_range_error(test_scm_run_datetimes):
error_msg = re.escape("`time` can only be filtered with datetimes")
with pytest.raises(TypeError, match=error_msg):
test_scm_run_datetimes.filter(time=range(2000, 2008))
def test_filter_as_kwarg(scm_run):
obs = list(scm_run.filter(variable="Primary Energy|Coal")["scenario"].unique())
assert obs == ["a_scenario"]
def test_filter_keep_false_time(scm_run):
df = scm_run.filter(year=2005, keep=False)
assert 2005 not in df.time_points.years()
assert 2010 in df.time_points.years()
obs = df.filter(scenario="a_scenario").timeseries().values.ravel()
npt.assert_array_equal(obs, [6, 6, 3, 3])
def test_filter_keep_false_metadata(scm_run):
df = scm_run.filter(variable="Primary Energy|Coal", keep=False)
assert "Primary Energy|Coal" not in df["variable"].tolist()
assert "Primary Energy" in df["variable"].tolist()
obs = df.filter(scenario="a_scenario").timeseries().values.ravel()
npt.assert_array_equal(obs, [1, 6, 6])
def test_filter_keep_false_time_and_metadata(scm_run):
error_msg = (
"If keep==False, filtering cannot be performed on the temporal axis "
"and with metadata at the same time"
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
scm_run.filter(variable="Primary Energy|Coal", year=2005, keep=False)
def test_filter_keep_false_successive(scm_run):
df = scm_run.filter(variable="Primary Energy|Coal", keep=False).filter(
year=2005, keep=False
)
obs = df.filter(scenario="a_scenario").timeseries().values.ravel()
npt.assert_array_equal(obs, [6, 6])
def test_filter_by_regexp(scm_run):
obs = scm_run.filter(scenario="a_scenari.$", regexp=True)
assert obs["scenario"].unique() == "a_scenario"
@pytest.mark.parametrize(
"regexp,exp_units", ((True, []), (False, ["W/m^2"]),),
)
def test_filter_by_regexp_caret(scm_run, regexp, exp_units):
tunits = ["W/m2"] * scm_run.shape[1]
tunits[-1] = "W/m^2"
scm_run["unit"] = tunits
obs = scm_run.filter(unit="W/m^2", regexp=regexp)
if not exp_units:
assert obs.empty
else:
assert obs.get_unique_meta("unit") == exp_units
def test_filter_asterisk_edgecase(scm_run):
scm_run["extra"] = ["*", "*", "other"]
obs = scm_run.filter(scenario="*")
assert len(obs) == len(scm_run)
obs = scm_run.filter(scenario="*", level=0)
assert len(obs) == 2
obs = scm_run.filter(scenario="a_scenario", level=0)
assert len(obs) == 1
# Weird case where "*" matches everything instead of "*" in
obs = scm_run.filter(extra="*", regexp=False)
assert len(obs) == len(scm_run)
assert (obs["extra"] == ["*", "*", "other"]).all()
# Not valid regex
pytest.raises(re.error, scm_run.filter, extra="*", regexp=True)
def test_filter_timeseries_different_length():
# This is different to how `ScmDataFrame` deals with nans
# Nan and empty timeseries remain in the Run
df = ScmRun(
pd.DataFrame(
np.array([[1.0, 2.0, 3.0], [4.0, 5.0, np.nan]]).T, index=[2000, 2001, 2002]
),
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario", "a_scenario2"],
"region": ["World"],
"variable": ["Primary Energy"],
"unit": ["EJ/yr"],
},
)
npt.assert_array_equal(
df.filter(scenario="a_scenario2").timeseries().squeeze(), [4.0, 5.0, np.nan]
)
npt.assert_array_equal(df.filter(year=2002).timeseries().squeeze(), [3.0, np.nan])
exp = pd.Series(["a_scenario", "a_scenario2"], name="scenario")
obs = df.filter(year=2002)["scenario"]
pd.testing.assert_series_equal(exp, obs)
assert not df.filter(scenario="a_scenario2", year=2002).timeseries().empty
def test_filter_timeseries_nan_meta():
df = ScmRun(
pd.DataFrame(
np.array([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]]).T, index=[2000, 2001]
),
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario", "a_scenario2", np.nan],
"region": ["World"],
"variable": ["Primary Energy"],
"unit": ["EJ/yr"],
},
)
def with_nan_assertion(a, b):
assert len(a) == len(b)
assert all(
[(v == b[i]) or (np.isnan(v) and np.isnan(b[i])) for i, v in enumerate(a)]
)
res = df.filter(scenario="*")["scenario"].unique()
exp = ["a_scenario", "a_scenario2", np.nan]
with_nan_assertion(res, exp)
res = df.filter(scenario="")["scenario"].unique()
exp = [np.nan]
with_nan_assertion(res, exp)
res = df.filter(scenario=np.nan)["scenario"].unique()
exp = [np.nan]
with_nan_assertion(res, exp)
def test_filter_index(scm_run):
pd.testing.assert_index_equal(scm_run.meta.index, pd.Int64Index([0, 1, 2]))
run = scm_run.filter(variable="Primary Energy")
exp_index = pd.Int64Index([0, 2])
pd.testing.assert_index_equal(run["variable"].index, exp_index)
pd.testing.assert_index_equal(run.meta.index, exp_index)
pd.testing.assert_index_equal(run._df.columns, exp_index)
run = scm_run.filter(variable="Primary Energy", keep=False)
exp_index = pd.Int64Index([1])
pd.testing.assert_index_equal(run["variable"].index, exp_index)
pd.testing.assert_index_equal(run.meta.index, exp_index)
pd.testing.assert_index_equal(run._df.columns, exp_index)
def test_append_index(scm_run):
def _check(res, reversed):
exp_index = pd.Int64Index([0, 1, 2])
|
pd.testing.assert_index_equal(res.meta.index, exp_index)
|
pandas.testing.assert_index_equal
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
@pytest.fixture(
params=[
"linear",
"index",
"values",
"nearest",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def nontemporal_method(request):
"""Fixture that returns an (method name, required kwargs) pair.
This fixture does not include method 'time' as a parameterization; that
method requires a Series with a DatetimeIndex, and is generally tested
separately from these non-temporal methods.
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@pytest.fixture(
params=[
"linear",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def interp_methods_ind(request):
"""Fixture that returns a (method name, required kwargs) pair to
be tested for various Index types.
This fixture does not include methods - 'time', 'index', 'nearest',
'values' as a parameterization
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
class TestSeriesInterpolateData:
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.NaN])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
@td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
tm.assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
tm.assert_series_equal(s.interpolate(**kwargs), s)
s = Series([], dtype=object).interpolate()
tm.assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method="index")
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
)
tm.assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method="values")
tm.assert_series_equal(other_result, result)
tm.assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
"time-weighted interpolation only works on Series or DataFrames "
"with a DatetimeIndex"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="time")
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
tm.assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list("abcd"))
result = s.interpolate()
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method="quadratic")
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
result = s.interpolate(method="slinear")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="slinear", downcast="infer")
tm.assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="nearest")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="nearest", downcast="infer")
tm.assert_series_equal(result, expected)
# zero
expected =
|
Series([1, 3, 3, 12, 12, 25])
|
pandas.Series
|
""" Tests for dataframe module
"""
# pylint: disable=missing-function-docstring,missing-class-docstring
# pylint: disable=invalid-name,no-self-use
import unittest
import pandas as pd
import numpy as np
from data_science_tools import dataframe
from data_science_tools.dataframe import (
coalesce,
merge_on_index,
)
class TestCoalesce(unittest.TestCase):
def test_coalesce(self):
series = [
pd.Series([np.nan, 1, np.nan, np.nan, 1]),
pd.Series([np.nan, 2, np.nan, 2, 2]),
pd.Series([np.nan, np.nan, 3, 3, 3]),
]
expected = pd.Series([np.nan, 1, 3, 2, 1])
actual = coalesce(series)
np.testing.assert_array_equal(actual.values, expected.values)
def test_coalesce_df(self):
df = pd.DataFrame(
{
0: pd.Series([np.nan, 1, np.nan, np.nan, 1]),
1: pd.Series([np.nan, 2, np.nan, 2, 2]),
2: pd.Series([np.nan, np.nan, 3, 3, 3]),
}
)
expected = pd.Series([np.nan, 1, 3, 2, 1])
actual = coalesce([df[c] for c in df])
np.testing.assert_array_equal(actual.values, expected.values)
def test_coalesce_df_multiple_columns(self):
df = pd.DataFrame(
{
0: pd.Series([np.nan, 1, np.nan, np.nan, 1]),
1: pd.Series([np.nan, 2, np.nan, 2, 2]),
2: pd.Series([np.nan, np.nan, 3, 3, 3]),
}
)
# using the column names broke when multiples with same name.
df.columns = [0, 0, 0]
expected = pd.Series([np.nan, 1, 3, 2, 1])
actual = coalesce(df)
np.testing.assert_array_equal(actual.values, expected.values)
class TestWindowFunction(unittest.TestCase):
"""Test window_functions"""
def _generate_example(self, size=10):
df_test = pd.DataFrame()
df_test["name"] = pd.np.random.choice(["tom", "bob"], size)
df_test["height"] = pd.np.random.randint(45, 60, size)
return df_test
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
self.df_example_1 = pd.DataFrame(
[
("bob", 45),
("bob", 58),
("tom", 46),
("bob", 55),
("tom", 53),
("bob", 54),
("bob", 45),
("tom", 55),
("bob", 53),
("bob", 51),
],
columns=["name", "height"],
)
self.df_example_1.index += 10
self.df_example_2 = pd.DataFrame(
[
("bob", "smith", 45),
("bob", "jones", 50),
("tom", "smith", 53),
("bob", "jones", 50),
("bob", "jones", 58),
("tom", "jones", 47),
("bob", "smith", 54),
("bob", "jones", 48),
("tom", "smith", 59),
("tom", "smith", 49),
],
columns=["first_name", "last_name", "height"],
)
# MultiIndex
self.df_example_3 = self.df_example_2.copy()
self.df_example_3.index = pd.MultiIndex.from_tuples(
[
("developer", 30),
("developer", 31),
("developer", 32),
("developer", 33),
("programmer", 40),
("programmer", 41),
("programmer", 42),
("programmer", 43),
("programmer", 44),
("programmer", 45),
],
names=["occupation", "age"],
)
def _apply_example_1_height_mean(self, df):
return df["height"].mean()
def test_apply_full_range(self):
results = dataframe.window_function(
self.df_example_1,
self._apply_example_1_height_mean,
preceding=None,
following=None,
)
answer = pd.Series(51.5, index=self.df_example_1.index)
pd.testing.assert_series_equal(answer, results)
def test_apply_current(self):
results = dataframe.window_function(
self.df_example_1,
self._apply_example_1_height_mean,
)
answer = self.df_example_1["height"].astype(float)
answer.name = None
pd.testing.assert_series_equal(answer, results)
def test_apply_row_number(self):
results = dataframe.window_function(
self.df_example_1,
"row_number",
order_by="height",
)
answer = pd.Series(
[1, 10, 3, 8, 5, 7, 2, 9, 6, 4],
index=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
)
# TO DO fix result index
results.sort_index(inplace=True)
pd.testing.assert_series_equal(answer, results)
def test_apply_series_method(self):
example_data = pd.DataFrame()
example_data["column1"] = [2, 4, 6, 8, 10, 12]
results = dataframe.window_function(
example_data,
"mean",
"column1",
)
answer = example_data["column1"].astype(float)
answer.name = None
pd.testing.assert_series_equal(answer, results)
def test_apply_row_number_partition(self):
results = dataframe.window_function(
self.df_example_1,
"row_number",
partition_by="name",
order_by="height",
)
answer = pd.Series(
[1, 7, 1, 6, 2, 5, 2, 3, 4, 3],
index=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
)
# ordering causes results to be in random order
answer.sort_index(inplace=True)
results.sort_index(inplace=True)
pd.testing.assert_series_equal(answer, results)
def test_apply_row_number_partition_multiple(self):
results = dataframe.window_function(
self.df_example_2,
"row_number",
partition_by=["first_name", "last_name"],
order_by="height",
)
answer = pd.Series(
[1, 2, 3, 4, 1, 2, 1, 1, 2, 3], index=[7, 1, 3, 4, 0, 6, 5, 9, 2, 8]
)
# ordering causes results to be in random order
answer.sort_index(inplace=True)
results.sort_index(inplace=True)
pd.testing.assert_series_equal(answer, results)
def test_apply_row_number_partition_multindex(self):
results = dataframe.window_function(
self.df_example_3,
"row_number",
partition_by=["first_name", "last_name"],
order_by="height",
)
answer = pd.Series(
[1, 2, 3, 4, 1, 2, 1, 1, 2, 3],
)
answer.index = pd.MultiIndex.from_tuples(
[
("programmer", 43),
("developer", 31),
("developer", 33),
("programmer", 40),
("developer", 30),
("programmer", 42),
("programmer", 41),
("programmer", 45),
("developer", 32),
("programmer", 44),
],
names=["occupation", "age"],
)
answer.sort_index(inplace=True)
results.sort_index(inplace=True)
pd.testing.assert_series_equal(answer, results)
def test_preceding_inclusive_following_exclusive(self):
example_data = pd.DataFrame()
example_data["column1"] = [2, 4, 6, 8, 10, 12]
def apply(df):
return df["column1"].mean()
results = dataframe.window_function(
example_data,
apply,
preceding=2,
following=2,
)
answer = pd.Series(
[
3.0, # 0:2 -> [2, 4]
4.0, # 0:3 -> [2, 4, 6]
5.0, # 0:4 -> [2, 4, 6, 8]
7.0, # 1:5 -> [4, 6, 8, 10]
9.0, # 2:6 -> [6, 8, 10, 12]
10.0, # 3:7 -> [8, 10, 12]
]
)
pd.testing.assert_series_equal(answer, results)
def test_preceding_and_following_offsets(self):
example_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
import statsmodels.datasets
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.base.prediction import PredictionResults
from statsmodels.tsa.deterministic import Fourier
from statsmodels.tsa.exponential_smoothing.ets import ETSModel
from statsmodels.tsa.forecasting.stl import STLForecast
from statsmodels.tsa.seasonal import STL, DecomposeResult
from statsmodels.tsa.statespace.exponential_smoothing import (
ExponentialSmoothing,
)
@pytest.fixture(scope="module")
def data(request):
rs = np.random.RandomState(987654321)
err = rs.standard_normal(500)
index =
|
pd.date_range("1980-1-1", freq="M", periods=500)
|
pandas.date_range
|
import unittest
import os, shutil
import pandas as pd
from adapter.i_o import IO
import logging
logging.basicConfig(level=logging.DEBUG)
class IOTests(unittest.TestCase):
def test_load_from_excel(self):
"""Tests the typical LCC analysis case
where all input tables are saved as
named tables in an excel input sheet.
"""
path = os.path.join(os.getcwd(), r"adapter/tests/test.xlsx")
i_o = IO(path)
res = i_o.load()
self.assertTrue("db_path" in res.keys())
# tear down
shutil.rmtree(res["outpath"])
res_no_db = i_o.load(create_db=False)
self.assertFalse("db_path" in res_no_db.keys())
# uncomment only if you have a haggis
# server connection set up with a
# Secret.py file based on the Secret_example.py
# def test_load_from_excel_w_sqalchemy(self):
# """Tests the typical LCC analysis case
# where all input tables are saved as
# named tables in an excel input sheet.
# """
# path = os.path.join(os.getcwd(),
# r"adapter/tests/test_w_inputs_from_files_table_sqlalchemy.xlsx")
# i_o = IO(path)
# res = i_o.load()
# self.assertTrue(
# set(['adapter_example_table1',
# 'adapter_example_table2',
# 'adapter_example_table3']).issubset(
# set(res['tables_as_dict_of_dfs'].keys())))
def test_throws_error_at_duplication(self):
"""Errors when same-named table is identified."""
path = os.path.join(
os.getcwd(),
r"adapter/tests/inputs_from_files_vDuplicationError.csv",
)
i_o = IO(path)
self.assertRaises(ValueError, i_o.load)
def test_load_from_excel_w_input_from_files(self):
"""Tests the ability to load in input tables
from various additional files based on an info provided
in the usual excel sheet.
"""
path = os.path.join(
os.getcwd(), r"adapter/tests/test_w_inputs_from_files_table.xlsx"
)
i_o = IO(path)
res = i_o.load()
self.assertEqual(len(res["tables_as_dict_of_dfs"].keys()), 10)
# tear down
shutil.rmtree(res["outpath"])
def test_load_from_csv_inputs_from_files(self):
"""Tests loading from a single csv file that
points to further inputs of any supported type.
"""
path = os.path.join(
os.getcwd(), r"adapter/tests/inputs_from_files_vTest.csv"
)
i_o = IO(path)
res = i_o.load(to_numeric=["xlsx_table2"])
self.assertEqual(len(res["tables_as_dict_of_dfs"].keys()), 11)
self.assertEqual(len(res.keys()), 5)
def test_load_from_excel_no_run_parameters(self):
"""Tests loading from an excel table without
defined version and output path parameters.
"""
path = os.path.join(
os.getcwd(), r"adapter/tests/test_no_run_parameters.xlsx"
)
i_o = IO(path)
res = i_o.load()
self.assertEqual(len(res["tables_as_dict_of_dfs"].keys()), 2)
# tear down
shutil.rmtree(res["outpath"])
def test_load_from_db(self):
"""Tests loading from a db."""
path = os.path.join(os.getcwd(), r"adapter/tests/test.db")
i_o = IO(path)
res = i_o.load()
self.assertEqual(len(res["tables_as_dict_of_dfs"].keys()), 3)
def test_load_from_none(self):
"""Tests loading from a path specified as None."""
i_o = IO(None)
res = i_o.load()
self.assertEqual(isinstance(res, dict), True)
def test_first_col_to_index(self):
"""Tests if the first column is set as a index."""
lst = [["A", 1, 1], ["A", 2, 1], ["B", 3, 1], ["B", 4, 1]]
df1 = pd.DataFrame(lst, columns=["A", "B", "C"])
df2 = pd.DataFrame(lst, columns=["X", "Y", "Z"])
dict_of_dfs = {"df1": df1, "df2": df2}
path = os.path.join(os.getcwd(), r"adapter/tests/test.db")
i_o = IO(path)
case1 = i_o.first_col_to_index(dict_of_dfs, table_names=True)
case2 = i_o.first_col_to_index(dict_of_dfs, table_names=["df1"])
case1_check = {
"df1": df1.set_index("A", drop=True),
"df2": df2.set_index("X", drop=True),
}
case2_check = {"df1": df1.set_index("A", drop=True), "df2": df2}
# Case1: When all tables have to be modified with the first column as index
for x in case1.keys():
assert case1[x].equals(case1_check[x])
# Case2: When only some tables are modified
for x in case2.keys():
assert case2[x].equals(case2_check[x])
def test_process_column_labels(self):
"""Tests if undesired whitespace from column labels is removed."""
path = os.path.join(os.getcwd(), r"adapter/tests/test_labels.xlsx")
i_o = IO(path)
labels = ["aa bb ", " cc dd"]
expected_labels = ["aa bb", "cc dd"]
result_labels = i_o.process_column_labels(labels)
self.assertEqual(result_labels, expected_labels)
def test_write_to_db(self):
"""Tests main write method for type db"""
path = os.path.join(
os.getcwd(), r"adapter/tests/inputs_from_files_vTest.csv"
)
i_o = IO(path)
data_conn = i_o.load()
# write to db based on load method output only
i_o.write(type="db", data_connection=data_conn)
self.assertTrue(os.path.isfile(data_conn["db_path"]))
self.assertTrue(os.path.isdir(data_conn["outpath"]))
# tear down files
shutil.rmtree(data_conn["outpath"])
# write to db based on load method output but
# write new dataframes
new = {"df1":
|
pd.DataFrame([1, 2])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from itertools import product
from sklearn.model_selection import TimeSeriesSplit
import vectorbt as vbt
from vectorbt.generic import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
df = pd.DataFrame({
'a': [1, 2, 3, 4, np.nan],
'b': [np.nan, 4, 3, 2, 1],
'c': [1, 2, np.nan, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
group_by = np.array(['g1', 'g1', 'g2'])
@njit
def i_or_col_pow_nb(i_or_col, x, pow):
return np.power(x, pow)
@njit
def pow_nb(x, pow):
return np.power(x, pow)
@njit
def nanmean_nb(x):
return np.nanmean(x)
@njit
def i_col_nanmean_nb(i, col, x):
return np.nanmean(x)
@njit
def i_nanmean_nb(i, x):
return np.nanmean(x)
@njit
def col_nanmean_nb(col, x):
return np.nanmean(x)
# ############# accessors.py ############# #
class TestAccessors:
def test_shuffle(self):
pd.testing.assert_series_equal(
df['a'].vbt.shuffle(seed=seed),
pd.Series(
np.array([2.0, np.nan, 3.0, 1.0, 4.0]),
index=df['a'].index,
name=df['a'].name
)
)
np.testing.assert_array_equal(
df['a'].vbt.shuffle(seed=seed).values,
nb.shuffle_1d_nb(df['a'].values, seed=seed)
)
pd.testing.assert_frame_equal(
df.vbt.shuffle(seed=seed),
pd.DataFrame(
np.array([
[2., 2., 2.],
[np.nan, 4., 1.],
[3., 3., 2.],
[1., np.nan, 1.],
[4., 1., np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_value",
[-1, 0., np.nan],
)
def test_fillna(self, test_value):
pd.testing.assert_series_equal(df['a'].vbt.fillna(test_value), df['a'].fillna(test_value))
pd.testing.assert_frame_equal(df.vbt.fillna(test_value), df.fillna(test_value))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.bshift(test_n), df['a'].shift(-test_n))
np.testing.assert_array_equal(
df['a'].vbt.bshift(test_n).values,
nb.bshift_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.bshift(test_n), df.shift(-test_n))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.fshift(test_n), df['a'].shift(test_n))
np.testing.assert_array_equal(
df['a'].vbt.fshift(test_n).values,
nb.fshift_1d_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.fshift(test_n), df.shift(test_n))
def test_diff(self):
pd.testing.assert_series_equal(df['a'].vbt.diff(), df['a'].diff())
np.testing.assert_array_equal(df['a'].vbt.diff().values, nb.diff_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.diff(), df.diff())
def test_pct_change(self):
pd.testing.assert_series_equal(df['a'].vbt.pct_change(), df['a'].pct_change(fill_method=None))
np.testing.assert_array_equal(df['a'].vbt.pct_change().values, nb.pct_change_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.pct_change(), df.pct_change(fill_method=None))
def test_ffill(self):
pd.testing.assert_series_equal(df['a'].vbt.ffill(), df['a'].ffill())
pd.testing.assert_frame_equal(df.vbt.ffill(), df.ffill())
def test_product(self):
assert df['a'].vbt.product() == df['a'].product()
np.testing.assert_array_equal(df.vbt.product(), df.product())
def test_cumsum(self):
pd.testing.assert_series_equal(df['a'].vbt.cumsum(), df['a'].cumsum())
pd.testing.assert_frame_equal(df.vbt.cumsum(), df.cumsum())
def test_cumprod(self):
pd.testing.assert_series_equal(df['a'].vbt.cumprod(), df['a'].cumprod())
pd.testing.assert_frame_equal(df.vbt.cumprod(), df.cumprod())
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_min(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_min(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window),
df.rolling(test_window).min()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_max(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_max(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window),
df.rolling(test_window).max()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_mean(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_mean(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window),
df.rolling(test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [0, 1]))
)
def test_rolling_std(self, test_window, test_minp, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df['a'].rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df.rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window),
df.rolling(test_window).std()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust",
list(product([1, 2, 3, 4, 5], [1, None], [False, True]))
)
def test_ewm_mean(self, test_window, test_minp, test_adjust):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window),
df.ewm(span=test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [False, True], [0, 1]))
)
def test_ewm_std(self, test_window, test_minp, test_adjust, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window),
df.ewm(span=test_window).std()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_min(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_min(minp=test_minp),
df['a'].expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(minp=test_minp),
df.expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(),
df.expanding().min()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_max(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_max(minp=test_minp),
df['a'].expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(minp=test_minp),
df.expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(),
df.expanding().max()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_mean(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_mean(minp=test_minp),
df['a'].expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(minp=test_minp),
df.expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(),
df.expanding().mean()
)
@pytest.mark.parametrize(
"test_minp,test_ddof",
list(product([1, 3], [0, 1]))
)
def test_expanding_std(self, test_minp, test_ddof):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df['a'].expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df.expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(),
df.expanding().std()
)
def test_apply_along_axis(self):
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=0),
df.apply(pow_nb, args=(2,), axis=0, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=1),
df.apply(pow_nb, args=(2,), axis=1, raw=True)
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_apply(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb),
df.rolling(test_window).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(3, i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[2.75, 2.75, 2.75],
[np.nan, np.nan, np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_apply(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df['a'].expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df.expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb),
df.expanding().apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[2.0, 2.0, 2.0],
[2.2857142857142856, 2.2857142857142856, 2.2857142857142856],
[2.4, 2.4, 2.4],
[2.1666666666666665, 2.1666666666666665, 2.1666666666666665]
]),
index=df.index,
columns=df.columns
)
)
def test_groupby_apply(self):
pd.testing.assert_series_equal(
df['a'].vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df['a'].groupby(np.asarray([1, 1, 2, 2, 3])).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df.groupby(np.asarray([1, 1, 2, 2, 3])).agg({
'a': lambda x: nanmean_nb(x.values),
'b': lambda x: nanmean_nb(x.values),
'c': lambda x: nanmean_nb(x.values)
}), # any clean way to do column-wise grouping in pandas?
)
def test_groupby_apply_on_matrix(self):
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2., 2., 2.],
[2.8, 2.8, 2.8],
[1., 1., 1.]
]),
index=pd.Int64Index([1, 2, 3], dtype='int64'),
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_freq",
['1h', '3d', '1w'],
)
def test_resample_apply(self, test_freq):
pd.testing.assert_series_equal(
df['a'].vbt.resample_apply(test_freq, i_col_nanmean_nb),
df['a'].resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply(test_freq, i_col_nanmean_nb),
df.resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply('3d', i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2.28571429, 2.28571429, 2.28571429],
[2., 2., 2.]
]),
index=pd.DatetimeIndex(['2018-01-01', '2018-01-04'], dtype='datetime64[ns]', freq='3D'),
columns=df.columns
)
)
def test_applymap(self):
@njit
def mult_nb(i, col, x):
return x * 2
pd.testing.assert_series_equal(
df['a'].vbt.applymap(mult_nb),
df['a'].map(lambda x: x * 2)
)
pd.testing.assert_frame_equal(
df.vbt.applymap(mult_nb),
df.applymap(lambda x: x * 2)
)
def test_filter(self):
@njit
def greater_nb(i, col, x):
return x > 2
pd.testing.assert_series_equal(
df['a'].vbt.filter(greater_nb),
df['a'].map(lambda x: x if x > 2 else np.nan)
)
pd.testing.assert_frame_equal(
df.vbt.filter(greater_nb),
df.applymap(lambda x: x if x > 2 else np.nan)
)
def test_apply_and_reduce(self):
@njit
def every_nth_nb(col, a, n):
return a[::n]
@njit
def sum_nb(col, a, b):
return np.nansum(a) + b
assert df['a'].vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)) == \
df['a'].iloc[::2].sum() + 3
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)),
df.iloc[::2].sum().rename('apply_and_reduce') + 3
)
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(
every_nth_nb, sum_nb, apply_args=(2,),
reduce_args=(3,), wrap_kwargs=dict(time_units=True)),
(df.iloc[::2].sum().rename('apply_and_reduce') + 3) * day_dt
)
def test_reduce(self):
@njit
def sum_nb(col, a):
return np.nansum(a)
assert df['a'].vbt.reduce(sum_nb) == df['a'].sum()
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb),
df.sum().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, wrap_kwargs=dict(time_units=True)),
df.sum().rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, group_by=group_by),
pd.Series([20.0, 6.0], index=['g1', 'g2']).rename('reduce')
)
@njit
def argmax_nb(col, a):
a = a.copy()
a[np.isnan(a)] = -np.inf
return np.argmax(a)
assert df['a'].vbt.reduce(argmax_nb, to_idx=True) == df['a'].idxmax()
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True),
df.idxmax().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True, flatten=True, group_by=group_by),
pd.Series(['2018-01-02', '2018-01-02'], dtype='datetime64[ns]', index=['g1', 'g2']).rename('reduce')
)
@njit
def min_and_max_nb(col, a):
out = np.empty(2)
out[0] = np.nanmin(a)
out[1] = np.nanmax(a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([np.nanmin(df['a']), np.nanmax(df['a'])], index=['min', 'max'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
df.apply(lambda x: pd.Series(np.asarray([np.nanmin(x), np.nanmax(x)]), index=['min', 'max']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True, group_by=group_by,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame([[1.0, 1.0], [4.0, 2.0]], index=['min', 'max'], columns=['g1', 'g2'])
)
@njit
def argmin_and_argmax_nb(col, a):
# nanargmin and nanargmax
out = np.empty(2)
_a = a.copy()
_a[np.isnan(_a)] = np.inf
out[0] = np.argmin(_a)
_a = a.copy()
_a[np.isnan(_a)] = -np.inf
out[1] = np.argmax(_a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.Series([df['a'].idxmin(), df['a'].idxmax()], index=['idxmin', 'idxmax'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
df.apply(lambda x: pd.Series(np.asarray([x.idxmin(), x.idxmax()]), index=['idxmin', 'idxmax']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='C', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-02', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='F', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-04', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
def test_squeeze_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.squeeze_grouped(i_col_nanmean_nb, group_by=group_by),
pd.DataFrame([
[1.0, 1.0],
[3.0, 2.0],
[3.0, np.nan],
[3.0, 2.0],
[1.0, 1.0]
], index=df.index, columns=['g1', 'g2'])
)
def test_flatten_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='C'),
pd.DataFrame([
[1.0, 1.0],
[np.nan, np.nan],
[2.0, 2.0],
[4.0, np.nan],
[3.0, np.nan],
[3.0, np.nan],
[4.0, 2.0],
[2.0, np.nan],
[np.nan, 1.0],
[1.0, np.nan]
], index=np.repeat(df.index, 2), columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='F'),
pd.DataFrame([
[1.0, 1.0],
[2.0, 2.0],
[3.0, np.nan],
[4.0, 2.0],
[np.nan, 1.0],
[np.nan, np.nan],
[4.0, np.nan],
[3.0, np.nan],
[2.0, np.nan],
[1.0, np.nan]
], index=np.tile(df.index, 2), columns=['g1', 'g2'])
)
@pytest.mark.parametrize(
"test_name,test_func,test_func_nb",
[
('min', lambda x, **kwargs: x.min(**kwargs), nb.nanmin_nb),
('max', lambda x, **kwargs: x.max(**kwargs), nb.nanmax_nb),
('mean', lambda x, **kwargs: x.mean(**kwargs), nb.nanmean_nb),
('median', lambda x, **kwargs: x.median(**kwargs), nb.nanmedian_nb),
('std', lambda x, **kwargs: x.std(**kwargs, ddof=0), nb.nanstd_nb),
('count', lambda x, **kwargs: x.count(**kwargs), nb.nancnt_nb),
('sum', lambda x, **kwargs: x.sum(**kwargs), nb.nansum_nb)
],
)
def test_funcs(self, test_name, test_func, test_func_nb):
# numeric
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack()),
test_func(df['c'])
], index=['g1', 'g2']).rename(test_name)
)
np.testing.assert_array_equal(test_func(df).values, test_func_nb(df.values))
pd.testing.assert_series_equal(
test_func(df.vbt, wrap_kwargs=dict(time_units=True)),
test_func(df).rename(test_name) * day_dt
)
# boolean
bool_ts = df == df
assert test_func(bool_ts['a'].vbt) == test_func(bool_ts['a'])
pd.testing.assert_series_equal(
test_func(bool_ts.vbt),
test_func(bool_ts).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(bool_ts.vbt, wrap_kwargs=dict(time_units=True)),
test_func(bool_ts).rename(test_name) * day_dt
)
@pytest.mark.parametrize(
"test_name,test_func",
[
('idxmin', lambda x, **kwargs: x.idxmin(**kwargs)),
('idxmax', lambda x, **kwargs: x.idxmax(**kwargs))
],
)
def test_arg_funcs(self, test_name, test_func):
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack())[0],
test_func(df['c'])
], index=['g1', 'g2'], dtype='datetime64[ns]').rename(test_name)
)
def test_describe(self):
pd.testing.assert_series_equal(
df['a'].vbt.describe(),
df['a'].describe()
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=None),
df.describe(percentiles=None)
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=[]),
df.describe(percentiles=[])
)
test_against = df.describe(percentiles=np.arange(0, 1, 0.1))
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1)),
test_against
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1), group_by=group_by),
pd.DataFrame({
'g1': df[['a', 'b']].stack().describe(percentiles=np.arange(0, 1, 0.1)).values,
'g2': df['c'].describe(percentiles=np.arange(0, 1, 0.1)).values
}, index=test_against.index)
)
def test_drawdown(self):
pd.testing.assert_series_equal(
df['a'].vbt.drawdown(),
df['a'] / df['a'].expanding().max() - 1
)
pd.testing.assert_frame_equal(
df.vbt.drawdown(),
df / df.expanding().max() - 1
)
def test_drawdowns(self):
assert type(df['a'].vbt.drawdowns) is vbt.Drawdowns
assert df['a'].vbt.drawdowns.wrapper.freq == df['a'].vbt.wrapper.freq
assert df['a'].vbt.drawdowns.wrapper.ndim == df['a'].ndim
assert df.vbt.drawdowns.wrapper.ndim == df.ndim
def test_to_mapped_array(self):
np.testing.assert_array_equal(
df.vbt.to_mapped_array().values,
np.array([1., 2., 3., 4., 4., 3., 2., 1., 1., 2., 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().col_arr,
np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().idx_arr,
np.array([0, 1, 2, 3, 1, 2, 3, 4, 0, 1, 3, 4])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).values,
np.array([1., 2., 3., 4., np.nan, np.nan, 4., 3., 2., 1., 1., 2., np.nan, 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).col_arr,
np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).idx_arr,
np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
)
def test_zscore(self):
pd.testing.assert_series_equal(
df['a'].vbt.zscore(),
(df['a'] - df['a'].mean()) / df['a'].std(ddof=0)
)
pd.testing.assert_frame_equal(
df.vbt.zscore(),
(df - df.mean()) / df.std(ddof=0)
)
def test_split(self):
splitter = TimeSeriesSplit(n_splits=2)
(train_df, train_indexes), (test_df, test_indexes) = df['a'].vbt.split(splitter)
pd.testing.assert_frame_equal(
train_df,
pd.DataFrame(
np.array([
[1.0, 1.0],
[2.0, 2.0],
[3.0, 3.0],
[np.nan, 4.0]
]),
index=pd.RangeIndex(start=0, stop=4, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
train_indexes[i],
target[i]
)
pd.testing.assert_frame_equal(
test_df,
pd.DataFrame(
np.array([
[4.0, np.nan]
]),
index=
|
pd.RangeIndex(start=0, stop=1, step=1)
|
pandas.RangeIndex
|
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
|
pd.Timestamp('2020-01-06 00:00:00')
|
pandas.Timestamp
|
import inspect
from typing import Dict, Callable, List, Any
from collections import defaultdict
from copy import deepcopy
import isodate
import pandas as pd
import numpy as np
from monthdelta import monthdelta
from sklearn.linear_model import LinearRegression
from sklearn.utils import resample
def get_timedelta_from_granularity(granularity: str):
datetime_interval = isodate.parse_duration(granularity)
if isinstance(datetime_interval, isodate.duration.Duration):
years, months = datetime_interval.years, datetime_interval.months
total_months = int(years * 12 + months)
datetime_interval = monthdelta(months=total_months)
return datetime_interval
class BaseEstimator:
"""
Implements get/set parameters logic, validating estimator and other
methods, common for all estimators. This is improved version of sklearn get/set params logic,
that also checks all parent's class parameters in addition to self
"""
@classmethod
def _get_param_names(cls, deep=True) -> List[str]:
"""Get parameter names for the estimator"""
def get_param_names_for_class(cls):
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [
p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL
]
# Add extra_params - params, that are not listed in signature (is_fitted e.q.)
extra_params = getattr(cls, 'extra_params', [])
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters] + extra_params)
# get self params
parameters = get_param_names_for_class(cls)
# if deep get all parents params
if deep:
for parent_class in cls.__bases__:
parameters.extend(get_param_names_for_class(parent_class))
return parameters
def get_params(self, deep: bool = True) -> Dict[str, Any]:
"""
Get parameters for this estimator
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators
Returns
-------
params
Parameter names mapped to their values
"""
out = dict()
for key in self._get_param_names():
try:
value = getattr(self, key)
except AttributeError:
value = None
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""
Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects.
The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object
Parameters
----------
**params
Estimator parameters
Returns
-------
self
Estimator instance
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition('__')
if key not in valid_params:
raise ValueError('Invalid parameter %s for predictor %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self))
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
class TimeSeriesPredictor(BaseEstimator):
def __init__(
self,
granularity: str,
num_lags: int,
model: Callable = LinearRegression,
mappers: Dict[str, Callable] = {},
**kwargs
):
self.granularity = granularity
self.num_lags = num_lags
self.model = model(**kwargs)
self.mappers = mappers
self.fitted = False
self.std = None
def transform_into_matrix(self, ts: pd.Series) -> pd.DataFrame:
"""
Transforms time series into lags matrix to allow
applying supervised learning algorithms
Parameters
------------
ts
Time series to transform
Returns
--------
lags_matrix
Dataframe with transformed values
"""
ts_values = ts.values
data = {}
for i in range(self.num_lags + 1):
data[f'lag_{self.num_lags - i}'] = np.roll(ts_values, -i)
lags_matrix = pd.DataFrame(data)[:-self.num_lags]
lags_matrix.index = ts.index[self.num_lags:]
return lags_matrix
def enrich(
self,
lags_matrix: pd.DataFrame
) -> pd.DataFrame:
"""
Adds external features to time series
Parameters
------------
lags_matrix
Pandas dataframe with transformed time-series values
mappers
Dictionary of functions to map each timestamp of lags matrix.
Each function should take timestamp as the only positional parameter
and return value of your additional feature for that timestamp
"""
mappers = self.mappers
for name, mapper in mappers.items():
feature = pd.Series(lags_matrix.index.map(mapper), lags_matrix.index, name=name)
lags_matrix[name] = feature
return lags_matrix
def fit(self, ts: pd.Series, *args, **kwargs):
lag_matrix = self.transform_into_matrix(ts)
feature_matrix = self.enrich(lag_matrix)
X, y = feature_matrix.drop('lag_0', axis=1), feature_matrix['lag_0']
self.model.fit(X, y, *args, **kwargs)
self.fitted = True
def predict_next(self, ts_lags, n_steps=1):
if not self.model:
raise ValueError('Model is not fitted yet')
predict = {}
ts = deepcopy(ts_lags)
for _ in range(n_steps):
next_row = self.generate_next_row(ts)
next_timestamp = next_row.index[-1]
value = self.model.predict(next_row)[0]
predict[next_timestamp] = value
ts[next_timestamp] = value
return
|
pd.Series(predict)
|
pandas.Series
|
###############################################################################
#
# Software program written by <NAME> in year 2021.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# By using this software, the Disclaimer and Terms distributed with the
# software are deemed accepted, without limitation, by user.
#
# You should have received a copy of the Disclaimer and Terms document
# along with this program. If not, see... https://bit.ly/2Tlr9ii
#
###############################################################################
import pandas as pd
from pathlib import Path
import quantstats as qs
import xlsxwriter
import yfinance as yf
"""
Module for converting the standard indicator dictionaries exported in the strategy
object at the end of a Backtrader backtest.
"""
def add_key_to_df(df, test_number):
""" Inserts the key columns at the beginning of the dataframe"""
df.insert(0, "test_number", test_number)
return df
def tradelist(
scene, analyzer, test_number, workbook=None, sheet_format=None, agg_dict=None
):
"""
This analyzer prints a list of trades similar to amibroker, containing MFE and MAE
:param workbook: Excel workbook to be saved to disk.
:param analyzer: Backtest analyzer.
:param sheet_format: Dictionary holding formatting information such as col width, font etc.
:param agg_dict: Collects the dictionary outputs from backtrader for using in platting.
:return workbook: Excel workbook to be saved to disk.
"""
trade_list = analyzer[0]
if scene["save_db"]:
df = pd.DataFrame(trade_list)
df.columns = [x.replace("%", "_pct") for x in df.columns]
df = add_key_to_df(df, test_number)
agg_dict["trade_list"] = df
if scene["save_excel"]:
worksheet = workbook.add_worksheet("trade_list")
columns = trade_list[0].keys()
columns = [x.capitalize() for x in columns]
worksheet.write_row(0, 0, columns)
worksheet.set_row(0, None, sheet_format["header_format"])
worksheet.set_column("D:D", sheet_format["x_wide"], None)
worksheet.set_column("E:E", sheet_format["narrow"], sheet_format["float_2d"])
worksheet.set_column("F:F", sheet_format["x_wide"], None)
worksheet.set_column("G:G", sheet_format["narrow"], sheet_format["float_2d"])
worksheet.set_column("H:H", sheet_format["narrow"], sheet_format["percent"])
worksheet.set_column("I:I", sheet_format["narrow"], sheet_format["int_0d"])
worksheet.set_column("J:J", sheet_format["narrow"], sheet_format["percent"])
worksheet.set_column("L:M", sheet_format["narrow"], sheet_format["int_0d"])
worksheet.set_column("O:O", sheet_format["narrow"], sheet_format["int_0d"])
worksheet.set_column("P:P", sheet_format["narrow"], sheet_format["percent"])
worksheet.set_column("Q:Q", sheet_format["narrow"], sheet_format["percent"])
for i, d in enumerate(trade_list):
d["datein"] = d["datein"].strftime("%Y-%m-%d %H:%M")
d["dateout"] = d["dateout"].strftime("%Y-%m-%d %H:%M")
worksheet.write_row(i + 1, 0, d.values())
return workbook, agg_dict
def tradeclosed(
scene, analyzer, test_number, workbook=None, sheet_format=None, agg_dict=None
):
"""
Closed trades, pnl, commission, and duration.
:param workbook: Excel workbook to be saved to disk.
:param analyzer: Backtest analyzer.
:param sheet_format: Dictionary holding formatting information such as col width, font etc.
:param agg_dict: Collects the dictionary outputs from backtrader for using in platting.
:return workbook: Excel workbook to be saved to disk.
"""
trade_dict = analyzer.get_analysis()
columns_df = [
"Date Closed",
"Ticker",
"PnL",
"PnL Comm",
"Commission",
"Days Open",
]
if scene["save_db"]:
df =
|
pd.DataFrame(trade_dict)
|
pandas.DataFrame
|
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Lifts electrical transmission network to a single 380 kV voltage layer,
removes dead-ends of the network,
and reduces multi-hop HVDC connections to a single link.
Relevant Settings
-----------------
.. code:: yaml
costs:
USD2013_to_EUR2013:
discountrate:
marginal_cost:
capital_cost:
electricity:
max_hours:
renewables: (keys)
{technology}:
potential:
lines:
length_factor:
links:
p_max_pu:
solving:
solver:
name:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`costs_cf`, :ref:`electricity_cf`, :ref:`renewable_cf`,
:ref:`lines_cf`, :ref:`links_cf`, :ref:`solving_cf`
Inputs
------
- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
- ``resources/regions_onshore.geojson``: confer :ref:`busregions`
- ``resources/regions_offshore.geojson``: confer :ref:`busregions`
- ``networks/elec.nc``: confer :ref:`electricity`
Outputs
-------
- ``resources/regions_onshore_elec_s{simpl}.geojson``:
.. image:: ../img/regions_onshore_elec_s.png
:scale: 33 %
- ``resources/regions_offshore_elec_s{simpl}.geojson``:
.. image:: ../img/regions_offshore_elec_s .png
:scale: 33 %
- ``resources/busmap_elec_s{simpl}.csv``: Mapping of buses from ``networks/elec.nc`` to ``networks/elec_s{simpl}.nc``;
- ``networks/elec_s{simpl}.nc``:
.. image:: ../img/elec_s.png
:scale: 33 %
Description
-----------
The rule :mod:`simplify_network` does up to four things:
1. Create an equivalent transmission network in which all voltage levels are mapped to the 380 kV level by the function ``simplify_network(...)``.
2. DC only sub-networks that are connected at only two buses to the AC network are reduced to a single representative link in the function ``simplify_links(...)``. The components attached to buses in between are moved to the nearest endpoint. The grid connection cost of offshore wind generators are added to the captial costs of the generator.
3. Stub lines and links, i.e. dead-ends of the network, are sequentially removed from the network in the function ``remove_stubs(...)``. Components are moved along.
4. Optionally, if an integer were provided for the wildcard ``{simpl}`` (e.g. ``networks/elec_s500.nc``), the network is clustered to this number of clusters with the routines from the ``cluster_network`` rule with the function ``cluster_network.cluster(...)``. This step is usually skipped!
"""
import logging
from _helpers import configure_logging, update_p_nom_max
from cluster_network import clustering_for_n_clusters, cluster_regions
from add_electricity import load_costs
import pandas as pd
import numpy as np
import scipy as sp
from scipy.sparse.csgraph import connected_components, dijkstra
from functools import reduce
import pypsa
from pypsa.io import import_components_from_dataframe, import_series_from_dataframe
from pypsa.networkclustering import busmap_by_stubs, aggregategenerators, aggregateoneport, get_clustering_from_busmap, _make_consense
logger = logging.getLogger(__name__)
def simplify_network_to_380(n):
## All goes to v_nom == 380
logger.info("Mapping all network lines onto a single 380kV layer")
n.buses['v_nom'] = 380.
linetype_380, = n.lines.loc[n.lines.v_nom == 380., 'type'].unique()
lines_v_nom_b = n.lines.v_nom != 380.
n.lines.loc[lines_v_nom_b, 'num_parallel'] *= (n.lines.loc[lines_v_nom_b, 'v_nom'] / 380.)**2
n.lines.loc[lines_v_nom_b, 'v_nom'] = 380.
n.lines.loc[lines_v_nom_b, 'type'] = linetype_380
n.lines.loc[lines_v_nom_b, 's_nom'] = (
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel
)
# Replace transformers by lines
trafo_map =
|
pd.Series(n.transformers.bus1.values, index=n.transformers.bus0.values)
|
pandas.Series
|
""" Feature importance of ensemble models such as Gradient Boosted Trees or Random Forests is used in determining KMCs for each KCC
"""
import os
import sys
current_path=os.path.dirname(__file__)
parentdir = os.path.dirname(current_path)
#Adding Path to various Modules
sys.path.append("../core")
sys.path.append("../visualization")
sys.path.append("../utilities")
sys.path.append("../datasets")
sys.path.append("../trained_models")
sys.path.append("../config")
#from sklearn import RandomForestRegressor
import pathlib
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import metrics
import xgboost as xgb
from sklearn.externals import joblib
#Importing Config files
import assembly_config as config
import model_config as cftrain
#Importing required modules from the package
from measurement_system import HexagonWlsScanner
from assembly_system import VRMSimulationModel
from data_import import GetTrainData
import voxel_config as vc
from cop_viz import CopViz
def kmc_model_build(tree_based_model,point_data,selected_kcc,kcc_name,split_ratio=0.2,save_model=0):
"""kmc_model_build function inputs model_type and data to generate KMC for each KCC
:param tree_based_model: Type of model to be used for feature importance
:type tree_based_model: str (required)
:param point_data: input data consisting of node deviations
:type point_data: numpy.array (samples*nodes) (required)
:param selected_kcc: output data consisting of selected process parameter/KCC
:type selected_kcc: numpy.array (samples*1) (required)
:param kcc_name: unique identifier for the KCC
:type kcc_name: str (required)
:param split_ratio: test data split
:type split_ratio: float
:param save_model: Save model flag, set to 1 to save model
:type save_model: int
:returns: filtered_nodeIDs_x, node ids for which x-deviation is significant given the kcc
:rtype: numpy.array [kmcs*1]
:returns: filtered_nodeIDs_y, node ids for which y-deviation is significant given the kcc
:rtype: numpy.array [kmcs*1]
:returns: filtered_nodeIDs_z, node ids for which z-deviation is significant given the kcc
:rtype: numpy.array [kmcs*1]
"""
train_X, test_X, train_y, test_y = train_test_split(point_data, selected_kcc, test_size = 0.2)
train=train_X
target=train_y
train.index=range(0,train.shape[0])
target.index=range(0,train.shape[0])
#%%
print('KMC Generation for selected :', kcc_name)
if(tree_based_model=='rf'):
model=RandomForestRegressor(n_estimators=500,max_depth=300,n_jobs=-1,verbose=True)
if(tree_based_model=='xgb'):
model=xgb.XGBRegressor(colsample_bytree=0.4,gamma=0.045,learning_rate=0.07,max_depth=500,min_child_weight=1.5,n_estimators=150,reg_alpha=0.65,reg_lambda=0.45,subsample=0.95,n_jobs=-1,verbose=True)
model.fit(train,target)
#%%
y_pred = model.predict(test_X)
mae=metrics.mean_absolute_error(test_y, y_pred)
print('The MAE for feature selection for: ',kcc_name)
print(mae)
if(save_model==1):
filename = kcc_name+'_XGB_model.sav'
joblib.dump(model, filename)
print('Trained Model Saved to disk....')
thresholds = model.feature_importances_
node_id=np.arange(start=1, stop=point_data.shape[1]+1, step=1)
threshold_data=np.zeros((point_data.shape[1],2))
threshold_data[:,0]=node_id
threshold_data[:,1]=thresholds
print(point_data.shape[1])
node_IDs =
|
pd.DataFrame(data=threshold_data,columns=['node_id','Feature_Importance'])
|
pandas.DataFrame
|
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
|
Timestamp('2026-12-31')
|
pandas.Timestamp
|
#!/usr/pin/python
import pandas as pd
import numpy as np
import array
import os.path
import json
from ..utils import instance_path
from ..scrape import hae_dataa
from glob import glob
from contextlib import suppress
instancepath = instance_path() / "yle_scrape"
instancepath.mkdir(exist_ok=True)
def attach_data(df: pd.DataFrame) -> pd.DataFrame:
"""
attached scraped data to data frame
"""
# load in parties and constituents data
# if data is missing ask if scraping is wanted to be performed
with open(os.path.join(instancepath, f"parties.json"), "r", encoding='utf-8') as json_file:
parties = json.load(json_file)
with open(os.path.join(instancepath, f"constituencies.json"), "r", encoding='utf-8') as json_file:
constituencies = json.load(json_file)
""" #tätä glob hommaa en saanu toimiin, käy for loopin sisäl vaan yhen kerran ja hakee vaan yhden tiedoston
# load the scraped data to its own data frame
df_scraped = pd.DataFrame(columns=['first_name', 'last_name', 'election_number', 'image', 'election_promise_1', 'party', 'constituency'])
i = 1
with suppress(KeyError,FileNotFoundError):
for filename in glob(f"{instancepath}/candidate*.json"):
print("jee")
with open(filename, "r", encoding='utf-8') as json_file:
candidate = json.load(json_file)
party_name = None
constituency_name = None
for part in parties:
if part['id'] == candidate["party_id"]:
party_name = part['name_fi']
for consti in constituencies:
if consti['id'] == candidate["constituency_id"]:
constituency_name = consti['name_fi']
df_scraped = df_scraped.append({'first_name': candidate['first_name'],
'last_name': candidate['last_name'],
'election_number': candidate['election_number'],
'image': candidate['image'],
'election_promise_1': candidate['info']['election_promise_1'],
'party': party_name,
'constituency': constituency_name},
ignore_index = True)
#except (FileNotFoundError, KeyError):
"""
# load the scraped data to its own data frame
df_scraped =
|
pd.DataFrame(columns=['first_name', 'last_name', 'election_number', 'image', 'election_promise_1', 'party', 'constituency'])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# collectdata.py
import pandas as pd
import robin_stocks.robinhood as rh
from conf.secrets import EMAIL, PASSWORD
from cryptobot.constant import RAW_DIR, REF_DIR, TRADABLE_CRYPTOS
from cryptobot.d01_data.load_save_data import load_json_data
def collect_historical_data(ticker, interval='hour', span='week', bounds='24_7'):
login = rh.login(EMAIL, PASSWORD)
crypto_df = load_json_data(REF_DIR, TRADABLE_CRYPTOS)
if ticker not in crypto_df.values:
return pd.DataFrame()
hist = rh.get_crypto_historicals(
ticker,
interval=interval,
span=span,
bounds=bounds
)
hist_df = pd.DataFrame()
for i in range(len(hist)):
df = pd.DataFrame(hist[i], index=[i])
hist_df =
|
pd.concat([hist_df, df])
|
pandas.concat
|
import pandas as pd
import numpy as np
from datetime import datetime
from multiprocessing import Pool
from functools import partial
from pathos import pools as pp
import pickle as pkl
from UserCentricMeasurements import *
from ContentCentricMeasurements import *
from CommunityCentricMeasurements import *
from TEMeasurements import *
from collections import defaultdict
import jpype
import json
import os
basedir = os.path.dirname(__file__)
class BaselineMeasurements(UserCentricMeasurements, ContentCentricMeasurements, TEMeasurements, CommunityCentricMeasurements):
def __init__(self,
dfLoc,
content_node_ids=[],
user_node_ids=[],
metaContentData=False,
metaUserData=False,
contentActorsFile=os.path.join(basedir, './baseline_challenge_data/filtUsers-baseline.pkl'),
contentFile=os.path.join(basedir, './baseline_challenge_data/filtRepos-baseline.pkl'),
topNodes=[],
topEdges=[],
previousActionsFile='',
community_dictionary='',
# community_dictionary=os.path.join(basedir, './baseline_challenge_data/baseline_challenge_community_dict.pkl'),
te_config=os.path.join(basedir, './baseline_challenge_data/te_params_baseline.json'),
platform='github',
use_java=True):
super(BaselineMeasurements, self).__init__()
self.platform = platform
try:
# check if input is a data frame
dfLoc.columns
df = dfLoc
except:
# if not it should be a csv file path
df = pd.read_csv(dfLoc)
self.contribution_events = ['PullRequestEvent',
'PushEvent',
'IssuesEvent',
'IssueCommentEvent',
'PullRequestReviewCommentEvent',
'CommitCommentEvent',
'CreateEvent',
'post',
'tweet']
self.popularity_events = ['WatchEvent',
'ForkEvent',
'comment',
'post',
'retweet',
'quote',
'reply']
print('preprocessing...')
self.main_df = self.preprocess(df)
print('splitting optional columns...')
# store action and merged columns in a seperate data frame that is not used for most measurements
if platform == 'github' and len(self.main_df.columns) == 6 and 'action' in self.main_df.columns:
self.main_df_opt = self.main_df.copy()[['action', 'merged']]
self.main_df = self.main_df.drop(['action', 'merged'], axis=1)
else:
self.main_df_opt = None
# For content centric
print('getting selected content IDs...')
if content_node_ids != ['all']:
if self.platform == 'reddit':
self.selectedContent = self.main_df[self.main_df.root.isin(content_node_ids)]
elif self.platform == 'twitter':
self.selectedContent = self.main_df[self.main_df.root.isin(content_node_ids)]
else:
self.selectedContent = self.main_df[self.main_df.content.isin(content_node_ids)]
else:
self.selectedContent = self.main_df
# For userCentric
self.selectedUsers = self.main_df[self.main_df.user.isin(user_node_ids)]
print('processing repo metatdata...')
# read in external metadata files
# repoMetaData format - full_name_h,created_at,owner.login_h,language
# userMetaData format - login_h,created_at,location,company
if metaContentData != False:
self.useContentMetaData = True
meta_content_data = pd.read_csv(metaContentData)
self.contentMetaData = self.preprocessContentMeta(meta_content_data)
else:
self.useContentMetaData = False
print('processing user metatdata...')
if metaUserData != False:
self.useUserMetaData = True
self.userMetaData = self.preprocessUserMeta(pd.read_csv(metaUserData))
else:
self.useUserMetaData = False
# For Community
self.community_dict_file = community_dictionary
print('getting communities...')
if self.platform == 'github':
self.communityDF = self.getCommmunityDF(community_col='community')
elif self.platform == 'reddit':
self.communityDF = self.getCommmunityDF(community_col='subreddit')
else:
self.communityDF = self.getCommmunityDF(community_col='')
# read in previous events count external file (used only for one measurement)
try:
print('reading previous counts...')
self.previous_event_counts =
|
pd.read_csv(previousActionsFile)
|
pandas.read_csv
|
import os
from os.path import join as pjoin
import re
import multiprocessing as mp
from multiprocessing import Pool
from Bio.Seq import Seq
from Bio import SeqIO, SeqFeature
from Bio.SeqRecord import SeqRecord
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import warnings
import time
import sqlite3 as sql
from collections import defaultdict
import gc
from gtr_utils import change_ToWorkingDirectory, make_OutputDirectory, merge_ManyFiles, multiprocesssing_Submission, generate_AssemblyId, chunks
import subprocess
from subprocess import DEVNULL, STDOUT, check_call
def extract_GenbankMetadata(input_filename, gbff_raw_dir):
'''
'''
assembly_id = generate_AssemblyId(input_gbff_file=input_filename)
for x, record in enumerate(SeqIO.parse(pjoin(gbff_raw_dir,input_filename),'gb')):
if x == 0:
df_tax = pd.DataFrame({'assembly_id':[assembly_id], 'filename':[input_filename]})
try:
df_tax['organism'] = record.annotations['organism']
except:
df_tax['organism'] = 'NA'
break
return(df_tax)
def extract_GenbankFeatures(input_filename, gbff_raw_dir, assembly_faa_dir):
'''
Large function to process that extracts all relevant genbank data
Extracts CDS features
Writes all CDS to ./assembly folder
'''
assembly_id = generate_AssemblyId(input_gbff_file=input_filename)
store_rows = []
with open(pjoin(assembly_faa_dir, assembly_id+'.faa'),'w') as outfile:
n = 0
for x, record in enumerate(SeqIO.parse(pjoin(gbff_raw_dir,input_filename),'gb')):
## extract releavant CDS features ##
for f in record.features:
if f.type=='CDS':
try:
refseq_locus_tag = f.qualifiers['locus_tag'][0]
except:
refseq_locus_tag = 'NA'
try:
refseq_product = f.qualifiers['product'][0]
except:
refseq_product = 'NA'
try:
refseq_gene = f.qualifiers['gene'][0]
except:
refseq_gene = 'NA'
try:
reseq_translation = f.qualifiers['translation'][0]
except:
reseq_translation = 'NA'
##
if reseq_translation == 'NA':
pseudo_check = 'p'
else:
pseudo_check = 'g'
#internal assembly id, internal contig id, internal locus tag, refseq locus tag, cds start, cds end, cds strand, refseq product, refseq gene
locus_tag_string = str(n).zfill(5) # generates a buffer of up to 5 zeroes so all locus_tag have same length
locus_tag = assembly_id+'_'+locus_tag_string
# check that f.location gives the cds coordinates and not the whole contig coordinates
if len(f.location.parts) > 1:
start_cds = f.location.parts[1].start
end_cds = f.location.parts[1].end
else:
start_cds = f.location.start.position
end_cds = f.location.end.position
# store in list
store_rows.append(
[
assembly_id,
assembly_id+'_'+str(x), #contig_id
locus_tag,
refseq_locus_tag,
start_cds,
end_cds,
f.location.strand,
refseq_product,
refseq_gene,
pseudo_check
])
n+=1
if reseq_translation == 'NA':
continue
## Write CDS to file ##
outfile.write('>{} {}\n'.format(locus_tag, refseq_locus_tag))
outfile.write('{}\n'.format(reseq_translation))
## return genbank features
df_gbf = pd.DataFrame(store_rows,columns=['assembly_id','contig_id','locus_tag','refseq_locus_tag','cds_start','cds_end','strand','refseq_product','refseq_gene','pseudo_check'])
return(df_gbf)
def make_BlastDatabase(assembly_id, assembly_faa_dir, blast_db_path):
'''
'''
full_faa_path = pjoin(assembly_faa_dir,assembly_id+'.faa')
full_db_path = pjoin(blast_db_path,assembly_id)
check_call(['makeblastdb', '-in', full_faa_path, '-out', full_db_path, '-dbtype', 'prot', '-title', '"{}_db"'.format(assembly_id), '-parse_seqids'], stdout=DEVNULL, stderr=STDOUT)
def update_GenomesDatabase():
'''
'''
pass
def check_ForGenomes(genome_inputs_dir):
'''
'''
pass
def build_GenomesDatabase(genomes_list):
'''
'''
pass
## ---- START OF SCRIPT ---- ##
def BuildGenomeDatabase(UserInput_main_dir, UserInput_genome_inputs_dir, UserInput_processes):
'''
'''
# make directory, with subdirectories to store data. and database for genomic data
make_OutputDirectory(new_directory=UserInput_main_dir)
change_ToWorkingDirectory(directory_name=UserInput_main_dir)
make_OutputDirectory(new_directory='blast_database')
make_OutputDirectory(new_directory='assemblies')
conn = sql.connect(pjoin(UserInput_main_dir,'genomes.db')) # genome database holds all base info
##---- Extract organism info ----- ##
if os.path.isfile('genomes.db'):
try:
# if df_meta exists, then check for which files to be processed are not present in df_meta, if any
df_meta = pd.read_sql_query("SELECT assembly_id from gb_metadata",conn)
meta_exists_list = df_meta.assembly_id.tolist()
r_params = [[f, UserInput_genome_inputs_dir] for f in os.listdir(UserInput_genome_inputs_dir) if f.endswith('.gbff') if generate_AssemblyId(input_gbff_file=f) not in meta_exists_list]
except:
# if there is an error reading in df_meta, then it doesn't exist. Therefore, process all input files
r_params = [[f, UserInput_genome_inputs_dir] for f in os.listdir(UserInput_genome_inputs_dir) if f.endswith('.gbff')]
if len(r_params) != 0:
chunk_r_params_input = chunks(l=r_params, n=10)
print('Extracting metadata for {} genomes'.format(len(r_params)))
start_t = time.time()
df_meta = pd.DataFrame()
with Pool(processes=UserInput_processes) as p:
df_meta = pd.concat(p.starmap(extract_GenbankMetadata, r_params[:]))
df_meta.to_sql(name='gb_metadata',con=conn, if_exists='append',index_label='assembly_id',index=False)
print((time.time()-start_t)/60)
else:
print('All files already have metadata extracted')
##---- Extract gene features ----- ##
if os.path.isfile('genomes.db'):
try:
# if features have already been pre-processed, then check which files, if any, need to be processed
df_feat = pd.read_sql_query("SELECT DISTINCT assembly_id from gb_features",conn)
feat_exist_list = df_feat.assembly_id.tolist()
r_params = [[f, UserInput_genome_inputs_dir, 'assemblies'] for f in os.listdir(UserInput_genome_inputs_dir) if f.endswith('.gbff') if generate_AssemblyId(input_gbff_file=f) not in meta_exists_list]
except:
# if there is an error read in df_feat, then it doesn't exist and all files need to be processed
r_params = [[f, UserInput_genome_inputs_dir, 'assemblies'] for f in os.listdir(UserInput_genome_inputs_dir) if f.endswith('.gbff')]
update_sql_index = False
if len(r_params) != 0:
print('Extracting features for {} genomes'.format(len(r_params)))
start_t = time.time()
chunk_r_params_input = chunks(l=r_params, n=100) # Use a chunking strategy to reduce the memory consumption caused by having large pandas dataframes
for chunk_r in chunk_r_params_input:
df_feat =
|
pd.DataFrame()
|
pandas.DataFrame
|
import sys
from timeit import default_timer as timer
from hyperopt import hp, tpe, Trials
from hyperopt.fmin import fmin
sys.path.insert(0, '/home/jovyan/anton/power_repos/pg_model')
import csv
import logging
import os
import pickle
from pathlib import Path
import numpy as np
import pandas as pd
from lightgbm import LGBMRegressor
from sklearn.metrics import mean_absolute_error
from hyperopt import base
base.have_bson = False
project_dir = Path(__file__).resolve().parents[2]
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# exclude_cols = ['FinalDrillDate', 'RigReleaseDate', 'SpudDate','UWI']
exclude_cols_oil = ["UWI", "CompletionDate", 'DaysDrilling',
'DrillMetresPerDay',
'GroundElevation',
'HZLength',
'LengthDrill',
'Municipality',
#'Pool',
'SurfaceOwner',
'_Fracture`Stages',
'final_timediff',
'lic_timediff',
'rrd_timediff',
'st_timediff','Confidential','SurfAbandonDate']
exclude_cols_gas = ['ConfidentialReleaseDate', "UWI", "CompletionDate",
'CurrentOperator',
'DaysDrilling',
'DrillMetresPerDay',
'DrillingContractor',
'FinalDrillDate',
'KBElevation',
'LengthDrill',
'LicenceDate',
'Municipality',
'Pool',
'ProjectedDepth',
'RigReleaseDate',
'SpudDate',
'StatusSource',
'SurfaceOwner',
'TVD',
'TotalDepth',
'UnitName',
'_Fracture`Stages',
'cf_timediff',
'final_timediff',
'rrd_timediff',
'st_timediff','Confidential','SurfAbandonDate']
exclude_cols_water = ['ConfidentialReleaseDate',"UWI", "CompletionDate",
'DaysDrilling',
'DrillMetresPerDay',
'FinalDrillDate',
'GroundElevation',
'HZLength',
'KBElevation',
'LaheeClass',
'LicenceDate',
'Licensee',
'ProjectedDepth',
'SpudDate',
'TotalDepth',
'_Fracture`Stages',
'cf_timediff',
'final_timediff',
'lic_timediff',
'rrd_timediff',
'st_timediff','Confidential','SurfAbandonDate']
exclude_cols_dict = {'Oil_norm': exclude_cols_oil,
'Gas_norm': exclude_cols_gas,
'Water_norm': exclude_cols_water}
class LogLGBM(LGBMRegressor):
def fit(self, X, Y, **kwargs):
y_train = np.log(Y)
super(LogLGBM, self).fit(X, y_train, **kwargs)
return self
def predict(self, X):
preds = super(LogLGBM, self).predict(X)
preds = np.exp(preds)
return preds
def get_score(model, X, y, X_val, y_val):
model.fit(X, y, eval_set=(X_val, y_val), early_stopping_rounds=50, verbose=0)
score = mean_absolute_error(y_val, model.predict(X_val))
print(score)
return score
def main(input_file_path, tgt='Oil_norm'):
note = "LGBM"
input_file_name = os.path.join(input_file_path, 'Train_final.pck')
input_file_name_val = os.path.join(input_file_path, 'Validation_final.pck')
exclude_cols = exclude_cols_dict[tgt]
df = pd.read_pickle(input_file_name).drop(exclude_cols, axis=1)
df_val = pd.read_pickle(input_file_name_val).drop(exclude_cols, axis=1)
y = df.loc[~df[tgt].isna(), tgt]
X = df.loc[~df[tgt].isna(), :].drop(['Oil_norm', 'Gas_norm', 'Water_norm', 'EPAssetsId', '_Normalized`IP`BOE/d'],
axis=1)
X_holdout = df_val.loc[~df_val[tgt].isna(), :].drop(
['Oil_norm', 'Gas_norm', 'Water_norm', 'EPAssetsId', '_Normalized`IP`BOE/d'],
axis=1)
y_holdout = df_val.loc[~df_val[tgt].isna(), tgt]
# Prep output files for hyperopt for performance tracking:
trials = Trials()
space = {
"min_data_in_leaf": hp.uniform("min_data_in_leaf", 1, 40),
'num_leaves': hp.quniform('num_leaves', 30, 128, 1),
'feature_fraction': hp.uniform('feature_fraction', 0.1, 0.9),
'bagging_fraction': hp.uniform('bagging_fraction', 0.1, 1.0),
'lambda_l1': hp.uniform('lambda_l1', 0.1, 10),
'lambda_l2': hp.uniform('lambda_l2', 0.1, 10)
}
fpath = f'{project_dir}/models/{note}_{tgt}_feats_final_Trials.pkl'
fpath_csv = f'{project_dir}/models/{note}_{tgt}_feats_final_Trials.csv'
# File to save first results
of_connection = open(fpath_csv, 'w')
writer = csv.writer(of_connection)
# Write the headers to the file
writer.writerow(['loss', *list(space.keys()), 'train_time'])
of_connection.close()
def objective(params):
params = {
"min_data_in_leaf": int(params['min_data_in_leaf']),
"num_leaves": int(params['num_leaves']),
"feature_fraction": "{:.3f}".format(params['feature_fraction']),
"bagging_fraction": '{:.3f}'.format(params['bagging_fraction']),
"lambda_l1": params['lambda_l1'],
"lambda_l2": params['lambda_l2']
}
m = LogLGBM(learning_rate=0.05, n_estimators=500,
objective='mse', random_state=123, **params)
start_time = timer()
score = get_score(m, X, y, X_holdout, y_holdout)
run_time = timer() - start_time
# Write to the csv file ('a' means append)
of_connection = open(fpath_csv, 'a')
writer = csv.writer(of_connection)
writer.writerow([score, *list(params.values()), run_time])
of_connection.close()
print("Score {:.3f} params {}".format(score, params))
return score
best_lgbm = fmin(fn=objective,
space=space,
algo=tpe.suggest,
max_evals=250, trials=trials)
losses = [trials.trials[i]['result']['loss'] for i in range(len(trials.trials))]
params =
|
pd.DataFrame(trials.vals)
|
pandas.DataFrame
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Helper functions for the AWS-Alphafold notebook.
"""
from datetime import datetime
import boto3
import uuid
import sagemaker
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
import string
from string import ascii_uppercase, ascii_lowercase
import py3Dmol
import json
import re
boto_session = boto3.session.Session()
sm_session = sagemaker.session.Session(boto_session)
region = boto_session.region_name
s3 = boto_session.client("s3", region_name=region)
batch = boto_session.client("batch", region_name=region)
cfn = boto_session.client("cloudformation", region_name=region)
logs_client = boto_session.client("logs")
def create_job_name(suffix=None):
"""
Define a simple job identifier
"""
if suffix == None:
return datetime.now().strftime("%Y%m%dT%H%M%S")
else:
## Ensure that the suffix conforms to the Batch requirements, (only letters,
## numbers, hyphens, and underscores are allowed).
suffix = re.sub("\W", "_", suffix)
return datetime.now().strftime("%Y%m%dT%H%M%S") + "_" + suffix
def upload_fasta_to_s3(
sequences,
ids,
bucket=sm_session.default_bucket(),
job_name=uuid.uuid4(),
region="us-east-1",
):
"""
Create a fasta file and upload it to S3.
"""
file_out = "_tmp.fasta"
with open(file_out, "a") as f_out:
for i, seq in enumerate(sequences):
seq_record = SeqRecord(Seq(seq), id=ids[i])
SeqIO.write(seq_record, f_out, "fasta")
object_key = f"{job_name}/{job_name}.fasta"
response = s3.upload_file(file_out, bucket, object_key)
os.remove(file_out)
s3_uri = f"s3://{bucket}/{object_key}"
print(f"Sequence file uploaded to {s3_uri}")
return object_key
def list_alphafold_stacks():
af_stacks = []
for stack in cfn.list_stacks(
StackStatusFilter=["CREATE_COMPLETE", "UPDATE_COMPLETE"]
)["StackSummaries"]:
if "Alphafold on AWS Batch" in stack["TemplateDescription"]:
af_stacks.append(stack)
return af_stacks
def get_batch_resources(stack_name):
"""
Get the resource names of the Batch resources for running Alphafold jobs.
"""
# stack_name = af_stacks[0]["StackName"]
stack_resources = cfn.list_stack_resources(StackName=stack_name)
for resource in stack_resources["StackResourceSummaries"]:
if resource["LogicalResourceId"] == "GPUFoldingJobDefinition":
gpu_job_definition = resource["PhysicalResourceId"]
if resource["LogicalResourceId"] == "PrivateGPUJobQueue":
gpu_job_queue = resource["PhysicalResourceId"]
if resource["LogicalResourceId"] == "CPUFoldingJobDefinition":
cpu_job_definition = resource["PhysicalResourceId"]
if resource["LogicalResourceId"] == "PrivateCPUJobQueue":
cpu_job_queue = download_job_queue = resource["PhysicalResourceId"]
if resource["LogicalResourceId"] == "CPUDownloadJobDefinition":
download_job_definition = resource["PhysicalResourceId"]
# if resource["LogicalResourceId"] == "PublicCPUJobQueue":
# download_job_queue = resource["PhysicalResourceId"]
return {
"gpu_job_definition": gpu_job_definition,
"gpu_job_queue": gpu_job_queue,
"cpu_job_definition": cpu_job_definition,
"cpu_job_queue": cpu_job_queue,
"download_job_definition": download_job_definition,
"download_job_queue": download_job_queue,
}
def get_batch_job_info(jobId):
"""
Retrieve and format information about a batch job.
"""
job_description = batch.describe_jobs(jobs=[jobId])
output = {
"jobArn": job_description["jobs"][0]["jobArn"],
"jobName": job_description["jobs"][0]["jobName"],
"jobId": job_description["jobs"][0]["jobId"],
"status": job_description["jobs"][0]["status"],
"createdAt": datetime.utcfromtimestamp(
job_description["jobs"][0]["createdAt"] / 1000
).strftime("%Y-%m-%dT%H:%M:%SZ"),
"dependsOn": job_description["jobs"][0]["dependsOn"],
"tags": job_description["jobs"][0]["tags"],
}
if output["status"] in ["STARTING", "RUNNING", "SUCCEEDED", "FAILED"]:
output["logStreamName"] = job_description["jobs"][0]["container"][
"logStreamName"
]
return output
def get_batch_logs(logStreamName):
"""
Retrieve and format logs for batch job.
"""
try:
response = logs_client.get_log_events(
logGroupName="/aws/batch/job", logStreamName=logStreamName
)
except logs_client.meta.client.exceptions.ResourceNotFoundException:
return f"Log stream {logStreamName} does not exist. Please try again in a few minutes"
logs = pd.DataFrame.from_dict(response["events"])
logs.timestamp = logs.timestamp.transform(
lambda x: datetime.fromtimestamp(x / 1000)
)
logs.drop("ingestionTime", axis=1, inplace=True)
return logs
def download_dir(client, bucket, local="data", prefix=""):
"""Recursively download files from S3."""
paginator = client.get_paginator("list_objects_v2")
file_count = 0
for result in paginator.paginate(Bucket=bucket, Delimiter="/", Prefix=prefix):
if result.get("CommonPrefixes") is not None:
for subdir in result.get("CommonPrefixes"):
download_dir(client, bucket, local, subdir.get("Prefix"))
for file in result.get("Contents", []):
dest_pathname = os.path.join(local, file.get("Key"))
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
client.download_file(bucket, file.get("Key"), dest_pathname)
file_count += 1
print(f"{file_count} files downloaded from s3.")
return local
def download_results(bucket, job_name, local="data"):
"""Download MSA information from S3"""
return download_dir(s3, bucket, local, job_name)
def reduce_stockholm_file(sto_file):
"""Read in a .sto file and parse format it into a numpy array of the
same length as the first (target) sequence
"""
msa = AlignIO.read(sto_file, "stockholm")
msa_arr = np.array([list(rec) for rec in msa])
return msa_arr[:, msa_arr[0, :] != "-"]
def plot_msa_array(msa_arr, id=None):
total_msa_size = len(msa_arr)
if total_msa_size > 1:
aa_map = {res: i for i, res in enumerate("ABCDEFGHIJKLMNOPQRSTUVWXYZ-")}
msa_arr = np.array([[aa_map[aa] for aa in seq] for seq in msa_arr])
plt.figure(figsize=(12, 3))
plt.title(
f"Per-Residue Count of Non-Gap Amino Acids in the MSA for Sequence {id}"
)
plt.plot(np.sum(msa_arr != aa_map["-"], axis=0), color="black")
plt.ylabel("Non-Gap Count")
plt.yticks(range(0, total_msa_size + 1, max(1, int(total_msa_size / 3))))
return plt
else:
print("Unable to display MSA of length 1")
return None
def plot_msa_folder(msa_folder, id=None):
combined_msa = None
with os.scandir(msa_folder) as it:
for obj in it:
obj_path = os.path.splitext(obj.path)
if "pdb_hits" not in obj_path[0] and obj_path[1] == ".sto":
msa_arr = reduce_stockholm_file(obj.path)
if combined_msa is None:
combined_msa = msa_arr
else:
combined_msa = np.concatenate((combined_msa, msa_arr), axis=0)
if combined_msa is not None:
print(f"Total number of aligned sequences is {len(combined_msa)}")
plot_msa_array(combined_msa, id).show()
return None
else:
return None
def plot_msa_output_folder(path, id=None):
"""Plot MSAs in a folder that may have multiple chain folders"""
plots = []
monomer = True
with os.scandir(path) as it:
for obj in it:
if obj.is_dir():
monomer = False
plot_msa_folder(obj.path, id + " " + obj.name)
if monomer:
plot_msa_folder(path, id)
return None
def display_structure(
pdb_path,
color="lDDT",
show_sidechains=False,
show_mainchains=False,
chains=1,
vmin=50,
vmax=90,
):
"""
Display the predicted structure in a Jupyter notebook cell
"""
if color not in ["chain", "lDDT", "rainbow"]:
raise ValueError("Color must be 'LDDT' (default), 'chain', or 'rainbow'")
plot_pdb(
pdb_path,
show_sidechains=show_sidechains,
show_mainchains=show_mainchains,
color=color,
chains=chains,
vmin=vmin,
vmax=vmax,
).show()
if color == "lDDT":
plot_plddt_legend().show()
def submit_batch_alphafold_job(
job_name,
fasta_paths,
s3_bucket,
is_prokaryote_list=None,
data_dir="/mnt/data_dir/fsx",
output_dir="alphafold",
bfd_database_path="/mnt/bfd_database_path/bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt",
mgnify_database_path="/mnt/mgnify_database_path/mgy_clusters_2018_12.fa",
pdb70_database_path="/mnt/pdb70_database_path/pdb70",
obsolete_pdbs_path="/mnt/obsolete_pdbs_path/obsolete.dat",
template_mmcif_dir="/mnt/template_mmcif_dir/mmcif_files",
pdb_seqres_database_path="/mnt/pdb_seqres_database_path/pdb_seqres.txt",
small_bfd_database_path="/mnt/small_bfd_database_path/bfd-first_non_consensus_sequences.fasta",
uniclust30_database_path="/mnt/uniclust30_database_path/uniclust30_2018_08/uniclust30_2018_08",
uniprot_database_path="/mnt/uniprot_database_path/uniprot.fasta",
uniref90_database_path="/mnt/uniref90_database_path/uniref90.fasta",
max_template_date=datetime.now().strftime("%Y-%m-%d"),
db_preset="reduced_dbs",
model_preset="monomer",
benchmark=False,
use_precomputed_msas=False,
features_paths=None,
run_features_only=False,
logtostderr=True,
cpu=4,
memory=16,
gpu=1,
depends_on=None,
stack_name=None,
):
if stack_name is None:
stack_name = list_alphafold_stacks()[0]["StackName"]
batch_resources = get_batch_resources(stack_name)
container_overrides = {
"command": [
f"--fasta_paths={fasta_paths}",
f"--uniref90_database_path={uniref90_database_path}",
f"--mgnify_database_path={mgnify_database_path}",
f"--data_dir={data_dir}",
f"--template_mmcif_dir={template_mmcif_dir}",
f"--obsolete_pdbs_path={obsolete_pdbs_path}",
f"--output_dir={output_dir}",
f"--max_template_date={max_template_date}",
f"--db_preset={db_preset}",
f"--model_preset={model_preset}",
f"--s3_bucket={s3_bucket}",
],
"resourceRequirements": [
{"value": str(cpu), "type": "VCPU"},
{"value": str(memory * 1000), "type": "MEMORY"},
],
}
if model_preset == "multimer":
container_overrides["command"].append(
f"--uniprot_database_path={uniprot_database_path}"
)
container_overrides["command"].append(
f"--pdb_seqres_database_path={pdb_seqres_database_path}"
)
else:
container_overrides["command"].append(
f"--pdb70_database_path={pdb70_database_path}"
)
if db_preset == "reduced_dbs":
container_overrides["command"].append(
f"--small_bfd_database_path={small_bfd_database_path}"
)
else:
container_overrides["command"].append(
f"--uniclust30_database_path={uniclust30_database_path}"
)
container_overrides["command"].append(
f"--bfd_database_path={bfd_database_path}"
)
if is_prokaryote_list is not None:
container_overrides["command"].append(
f"--is_prokaryote_list={is_prokaryote_list}"
)
if benchmark:
container_overrides["command"].append("--benchmark")
if use_precomputed_msas:
container_overrides["command"].append("--use_precomputed_msas")
if features_paths is not None:
container_overrides["command"].append(f"--features_paths={features_paths}")
if run_features_only:
container_overrides["command"].append("--run_features_only")
if logtostderr:
container_overrides["command"].append("--logtostderr")
if gpu > 0:
job_definition = batch_resources["gpu_job_definition"]
job_queue = batch_resources["gpu_job_queue"]
container_overrides["resourceRequirements"].append(
{"value": str(gpu), "type": "GPU"}
)
else:
job_definition = batch_resources["cpu_job_definition"]
job_queue = batch_resources["cpu_job_queue"]
print(container_overrides)
if depends_on is None:
response = batch.submit_job(
jobDefinition=job_definition,
jobName=job_name,
jobQueue=job_queue,
containerOverrides=container_overrides,
)
else:
response = batch.submit_job(
jobDefinition=job_definition,
jobName=job_name,
jobQueue=job_queue,
containerOverrides=container_overrides,
dependsOn=[{"jobId": depends_on, "type": "SEQUENTIAL"}],
)
return response
def get_run_metrics(bucket, job_name):
timings_uri = sagemaker.s3.s3_path_join(bucket, job_name, "timings.json")
ranking_uri = sagemaker.s3.s3_path_join(bucket, job_name, "ranking_debug.json")
downloader = sagemaker.s3.S3Downloader()
timing_dict = json.loads(downloader.read_file(f"s3://{timings_uri}"))
ranking_dict = json.loads(downloader.read_file(f"s3://{ranking_uri}"))
timing_df = pd.DataFrame.from_dict(
timing_dict, orient="index", columns=["duration_sec"]
)
ranking_plddts_df = pd.DataFrame.from_dict(
ranking_dict["plddts"], orient="index", columns=["plddts"]
)
order_df =
|
pd.DataFrame.from_dict(ranking_dict["order"])
|
pandas.DataFrame.from_dict
|
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
result = DataFrame(self._shallow_copy(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def _summary(self, name=None):
"""
Return a summarized representation
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def summary(self, name=None):
"""
Return a summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
if isinstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr, kind=kind)
return indexer, keyarr
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
Convert an Index indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
Returns
-------
converted_keyarr : Index (or sub-class)
"""
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return keyarr
_index_shared_docs['_convert_list_indexer'] = """
Convert a list-like indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
kind : iloc, ix, loc, optional
Returns
-------
positional indexer or None
"""
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
"""
Extract duplicated index elements.
Returns a sorted list of index elements which appear more than once in
the index.
.. deprecated:: 0.23.0
Use idx[idx.duplicated()].unique() instead
Returns
-------
array-like
List of duplicated indexes.
See Also
--------
Index.duplicated : Return boolean array denoting duplicates.
Index.drop_duplicates : Return Index with duplicates removed.
Examples
--------
Works on different Index of types.
>>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates()
[2, 3]
>>> pd.Index([1., 2., 2., 3., 3., 3., 4.]).get_duplicates()
[2.0, 3.0]
>>> pd.Index(['a', 'b', 'b', 'c', 'c', 'c', 'd']).get_duplicates()
['b', 'c']
Note that for a DatetimeIndex, it does not return a list but a new
DatetimeIndex:
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',
... '2018-01-03', '2018-01-04', '2018-01-04'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex(['2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', freq=None)
Sorts duplicated elements even when indexes are unordered.
>>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates()
[2, 3]
Return empty array-like structure when all elements are unique.
>>> pd.Index([1, 2, 3, 4]).get_duplicates()
[]
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex([], dtype='datetime64[ns]', freq=None)
"""
warnings.warn("'get_duplicates' is deprecated and will be removed in "
"a future release. You can use "
"idx[idx.duplicated()].unique() instead",
FutureWarning, stacklevel=2)
return self[self.duplicated()].unique()
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._ndarray_values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
return self.is_object()
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
_index_shared_docs['__contains__'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
_index_shared_docs['contains'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
try:
return key in self._engine
except (TypeError, ValueError):
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if com.is_bool_indexer(key):
key = np.asarray(key)
key = com._values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def _can_hold_identifiers_and_holds_name(self, name):
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if self.is_object() or self.is_categorical():
return name in self
return False
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError('all inputs must be Index')
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat, name):
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._concat_same_dtype(to_concat, name=name)
return _concat._concat_index_asobject(to_concat, name=name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
# must be overridden in specific classes
return _concat._concat_index_asobject(to_concat, name)
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
if kwargs:
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.format(self.__class__.__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = _ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = algos.take(values,
indices,
allow_fill=allow_fill,
fill_value=na_value)
else:
taken = values.take(indices)
return taken
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
return self._isnan.any()
else:
return False
def isna(self):
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
A boolean array of whether my values are NA
See Also
--------
pandas.Index.notna : boolean inverse of isna.
pandas.Index.dropna : omit entries with missing values.
pandas.isna : top-level isna.
Series.isna : detect missing values in Series object.
Examples
--------
Show which entries in a pandas.Index are NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
array([False, False, True], dtype=bool)
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
array([False, False, False, True], dtype=bool)
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
... pd.Timestamp(''), None, pd.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.isna()
array([False, True, True, True], dtype=bool)
"""
return self._isnan
isnull = isna
def notna(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``
values.
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
Boolean array to indicate which entries are not NA.
See also
--------
Index.notnull : alias of notna
Index.isna: inverse of notna
pandas.notna : top-level notna
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.isna()
notnull = notna
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError) as err:
if is_object_dtype(self):
raise err
# coerces to object
return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.io.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isna(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalent(com._values_from_object(self),
com._values_from_object(other))
except Exception:
return False
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return self._na_value
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_values(self, return_indexer=False, ascending=True):
"""
Return a sorted copy of the index.
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
Returns
-------
sorted_index : pandas.Index
Sorted copy of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
pandas.Series.sort_values : Sort values of a Series.
pandas.DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = pd.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.Index
shifted index
See Also
--------
Series.shift : Shift values of Series.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shift(10, freq='D')
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shift(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy.ndarray
Integer indices that would sort the index if used as
an indexer.
See also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_values : Return sorted copy of Index.
Examples
--------
>>> idx = pd.Index(['b', 'a', 'd', 'c'])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
return Index(other + np.array(self))
def __iadd__(self, other):
# alias for __add__
return self + other
def __sub__(self, other):
raise TypeError("cannot perform __sub__ with this index type: "
"{typ}".format(typ=type(self).__name__))
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _get_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name meaning
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return self._shallow_copy(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if len(other) == 0 or self.equals(other):
return self._get_consensus_name(other)
if len(self) == 0:
return other._get_consensus_name(self)
# TODO: is_dtype_union_equal is a hack around
# 1. buggy set ops with duplicates (GH #13432)
# 2. CategoricalIndex lacking setops (GH #10186)
# Once those are fixed, this workaround can be removed
if not is_dtype_union_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.union(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self) or is_datetime64tz_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other) or is_datetime64tz_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(lvals, rvals)[0]
except TypeError:
# incomparable objects
result = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = algos.take_nd(rvals, indexer,
allow_fill=False)
result = _concat._concat_compat((lvals, other_diff))
try:
lvals[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = lvals
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`,
preserving the order of the calling index.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._get_consensus_name(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
# TODO(EA): setops-refactor, clean all this up
if is_period_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(lvals, rvals)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(rvals).get_indexer(lvals)
indexer = indexer.take((indexer != -1).nonzero()[0])
except Exception:
# duplicates
indexer = algos.unique1d(
Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
taken = other.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self._shallow_copy([])
other, result_name = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
the_diff = this.values.take(label_diff)
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
Compute the symmetric difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
this = self._get_unique_index()
other = other._get_unique_index()
indexer = this.get_indexer(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,
assume_unique=True)
left_diff = this.values.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
the_diff = _concat._concat_compat([left_diff, right_diff])
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
attribs = self._get_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropna:
return self
values = self.values
if not self.is_unique:
values = self.unique()
if dropna:
try:
if self.hasnans:
values = values[~isna(values)]
except NotImplementedError:
pass
return self._shallow_copy(values)
_index_shared_docs['get_loc'] = """
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
Tolerance may be a scalar
value, which applies the same tolerance to all values, or
list-like, which applies variable tolerance per element. List-like
includes list, tuple, array, Series, and must be the same size as
the index and its dtype must exactly match the index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.Index(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.Index(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
try:
return self._engine.get_loc(key)
except KeyError:
return self._engine.get_loc(self._maybe_cast_indexer(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):
# GH 20825
# Unify Index and ExtensionArray treatment
# First try to convert the key to a location
# If that fails, see if key is an integer, and
# try that
try:
iloc = self.get_loc(key)
return s[iloc]
except KeyError:
if is_integer(key):
return s[key]
s = com._values_from_object(series)
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return libindex.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(com._values_from_object(arr),
com._values_from_object(key), value)
def _get_level_values(self, level):
"""
Return an Index of values for requested level, equal to the length
of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``self``, as there is only one level in the Index.
See also
---------
pandas.MultiIndex.get_level_values : get values for a level of a
MultiIndex
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
def droplevel(self, level=0):
"""
Return index with requested level(s) removed. If resulting index has
only 1 level left, the result will be of Index type, not MultiIndex.
.. versionadded:: 0.23.1 (support for non-MultiIndex)
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
index : Index or MultiIndex
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]
if len(level) == 0:
return self
if len(level) >= self.nlevels:
raise ValueError("Cannot remove {} levels from an index with {} "
"levels: at least one level must be "
"left.".format(len(level), self.nlevels))
# The two checks above guarantee that here self is a MultiIndex
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
from .multi import MultiIndex
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
_index_shared_docs['get_indexer'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
# Treat boolean labels passed to a numeric index as not found. Without
# this fix False and True would be treated as 0 and 1 respectively.
# (GH #16877)
if target.is_boolean() and self.is_numeric():
return _ensure_platform_int(np.repeat(-1, target.size))
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexing')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._ndarray_values)
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance, target):
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad' else
self._engine.get_backfill_indexer)
indexer = method(target._ndarray_values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target._ndarray_values,
indexer,
tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
_index_shared_docs['get_indexer_non_unique'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : ndarray of int
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array
"""
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = _ensure_index(target)
if is_categorical(target):
target = target.astype(target.dtype.categories.dtype)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target._ndarray_values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return _ensure_platform_int(indexer), missing
def get_indexer_for(self, target, **kwargs):
"""
guaranteed return of an indexer even when non-unique
This dispatches to get_indexer or get_indexer_nonunique as appropriate
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _maybe_promote(self, other):
# A hack, but it works
from pandas.core.indexes.datetimes import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
from .multi import MultiIndex
if isinstance(values, MultiIndex):
values = values.values
values = _ensure_categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in compat.iteritems(result)}
return result
def map(self, mapper, na_action=None):
"""
Map values using input correspondence (a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from .multi import MultiIndex
new_values = super(Index, self)._map_values(
mapper, na_action=na_action)
attributes = self._get_attributes_dict()
# we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
elif attributes.get('name'):
names = [attributes.get('name')] * len(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values,
names=names)
attributes['copy'] = False
if not new_values.size:
# empty
attributes['dtype'] = self.dtype
return Index(new_values, **attributes)
def isin(self, values, level=None):
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The length of the returned boolean array matches
the length of the index.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
is_contained : ndarray
NumPy array of boolean values.
See also
--------
Series.isin : Same for Series.
DataFrame.isin : Same method for DataFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same length as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = pd.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],
labels=[[0, 1, 2], [2, 0, 1]],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
For a DatetimeIndex, string values in `values` are converted to
Timestamps.
>>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']
>>> dti = pd.to_datetime(dates)
>>> dti
DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],
dtype='datetime64[ns]', freq=None)
>>> dti.isin(['2000-03-11'])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(self, values)
def _can_reindex(self, indexer):
"""
*this is an internal non-public method*
Check if we are allowing reindexing with this particular indexer
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
*this is an internal non-public method*
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
target = _ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = _ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = _ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = _ensure_int64(length[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = 0
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
_index_shared_docs['join'] = """
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
sort : boolean, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword)
.. versionadded:: 0.20.0
Returns
-------
join_index, (left_indexer, right_indexer)
"""
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how,
return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if sort:
join_index = join_index.sort_values()
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = com._not_none(*self.names)
other_names = com._not_none(*other.names)
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no "
"overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level "
"overlap on a multi-index is not "
"implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not "
"implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self._ndarray_values],
[other._ndarray_values],
how=how,
sort=True)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = np.asarray(self._ndarray_values.take(left_idx))
mask = left_idx == -1
np.putmask(join_index, mask, other._ndarray_values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from .multi import MultiIndex
def _get_leaf_sorter(labels):
"""
returns sorter for the inner most level while preserving the
order of higher levels
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = _ensure_int64(labels[0])
sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = _ensure_int64(labels[-1])
return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_labels, ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
left_indexer = (None if left_indexer is None
else _ensure_platform_int(left_indexer))
right_indexer = (None if right_indexer is None
else _ensure_platform_int(right_indexer))
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self._ndarray_values
ov = other._ndarray_values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
lidx = None if lidx is None else _ensure_platform_int(lidx)
ridx = None if ridx is None else _ensure_platform_int(ridx)
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered or unique index, compute the slice indexer for input
labels and step.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : slice
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
Notes
-----
This function assumes that the data is sorted, so use at your own peril
Examples
---------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_indexer(start='b', end='c')
slice(1, 3)
>>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
>>> idx.slice_indexer(start='b', end=('c', 'g'))
slice(1, 3)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step,
kind=kind)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index
then try to cast to an int if equivalent
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
key = ckey
except (OverflowError, ValueError, TypeError):
pass
return key
def _validate_indexer(self, form, key, kind):
"""
if we are positional indexer
validate that we have appropriate typed bounds
must be an integer
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc']
if key is None:
pass
elif is_integer(key):
pass
elif kind in ['iloc', 'getitem']:
self._invalid_indexer(form, key)
return key
_index_shared_docs['_maybe_cast_slice_bound'] = """
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
if not (kind in ['ix'] and (self.holds_integer() or
self.is_floating())):
self._invalid_indexer('slice', label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer('slice', label)
return label
def _searchsorted_monotonic(self, label, side='left'):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
else 'left')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
def _get_loc_only_exact_matches(self, key):
"""
This is overridden on subclasses (namely, IntervalIndex) to control
get_slice_bound.
"""
return self.get_loc(key)
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ['ix', 'loc', 'getitem', None]
if side not in ('left', 'right'):
raise ValueError("Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" %
(side, ))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self._get_loc_only_exact_matches(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))
if isinstance(slc, np.ndarray):
raise KeyError("Cannot get %s slice bound for non-unique "
"label: %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
Notes
-----
This method only works if the index is monotonic or unique.
Examples
---------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3)
See Also
--------
Index.get_loc : Get location for a single label
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
if is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
_self = np.asarray(self)
item = self._coerce_scalar_to_index(item)._ndarray_values
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
Raises
------
KeyError
If none of the labels are found in the selected axis
"""
arr_dtype = 'object' if self.dtype == 'object' else None
labels = com._index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise KeyError(
'labels %s not contained in axis' % labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
_index_shared_docs['index_unique'] = (
"""
Return unique values in the index. Uniques are returned in order
of appearance, this does NOT sort.
Parameters
----------
level : int or str, optional, default None
Only return values from specified level (for MultiIndex)
.. versionadded:: 0.23.0
Returns
-------
Index without duplicates
See Also
--------
unique
Series.unique
""")
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = super(Index, self).unique()
return self._shallow_copy(result)
def drop_duplicates(self, keep='first'):
"""
Return Index with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
Returns
-------
deduplicated : Index
See Also
--------
Series.drop_duplicates : equivalent method on Series
DataFrame.drop_duplicates : equivalent method on DataFrame
Index.duplicated : related method on Index, indicating duplicate
Index values.
Examples
--------
Generate an pandas.Index with duplicate values.
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
The `keep` parameter controls which duplicate values are removed.
The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> idx.drop_duplicates(keep='first')
Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')
The value 'last' keeps the last occurrence for each set of duplicated
entries.
>>> idx.drop_duplicates(keep='last')
Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')
The value ``False`` discards all sets of duplicated entries.
>>> idx.drop_duplicates(keep=False)
Index(['cow', 'beetle', 'hippo'], dtype='object')
"""
return super(Index, self).drop_duplicates(keep=keep)
def duplicated(self, keep='first'):
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep='first')
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> idx.duplicated(keep='last')
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
Returns
-------
numpy.ndarray
See Also
--------
pandas.Series.duplicated : Equivalent method on pandas.Series
pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame
pandas.Index.drop_duplicates : Remove duplicate values from Index
"""
return super(Index, self).duplicated(keep=keep)
_index_shared_docs['fillna'] = """
Fill NA/NaN values with the specified value
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
Returns
-------
filled : %(klass)s
"""
@Appender(_index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
self._assert_can_do_op(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
return self._shallow_copy()
_index_shared_docs['dropna'] = """
Return Index without NA/NaN values
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
valid : Index
"""
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
if how not in ('any', 'all'):
raise ValueError("invalid how option: {0}".format(how))
if self.hasnans:
return self._shallow_copy(self.values[~self._isnan])
return self._shallow_copy()
def _evaluate_with_timedelta_like(self, other, op):
# Timedelta knows how to operate with np.array, so dispatch to that
# operation and then wrap the results
other = Timedelta(other)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op == divmod:
return Index(result[0], **attrs), Index(result[1], **attrs)
return Index(result, **attrs)
def _evaluate_with_datetime_like(self, other, op):
raise TypeError("can only perform ops with datetime like values")
def _evaluate_compare(self, other, op):
raise com.AbstractMethodError(self)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
cls.__eq__ = _make_comparison_op(operator.eq, cls)
cls.__ne__ = _make_comparison_op(operator.ne, cls)
cls.__lt__ = _make_comparison_op(operator.lt, cls)
cls.__gt__ = _make_comparison_op(operator.gt, cls)
cls.__le__ = _make_comparison_op(operator.le, cls)
cls.__ge__ = _make_comparison_op(operator.ge, cls)
@classmethod
def _add_numeric_methods_add_sub_disabled(cls):
""" add in the numeric add/sub methods to disable """
cls.__add__ = make_invalid_op('__add__')
cls.__radd__ = make_invalid_op('__radd__')
cls.__iadd__ = make_invalid_op('__iadd__')
cls.__sub__ = make_invalid_op('__sub__')
cls.__rsub__ = make_invalid_op('__rsub__')
cls.__isub__ = make_invalid_op('__isub__')
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable other than add/sub """
cls.__pow__ = make_invalid_op('__pow__')
cls.__rpow__ = make_invalid_op('__rpow__')
cls.__mul__ = make_invalid_op('__mul__')
cls.__rmul__ = make_invalid_op('__rmul__')
cls.__floordiv__ = make_invalid_op('__floordiv__')
cls.__rfloordiv__ = make_invalid_op('__rfloordiv__')
cls.__truediv__ = make_invalid_op('__truediv__')
cls.__rtruediv__ = make_invalid_op('__rtruediv__')
if not compat.PY3:
cls.__div__ = make_invalid_op('__div__')
cls.__rdiv__ = make_invalid_op('__rdiv__')
cls.__mod__ = make_invalid_op('__mod__')
cls.__divmod__ = make_invalid_op('__divmod__')
cls.__neg__ = make_invalid_op('__neg__')
cls.__pos__ = make_invalid_op('__pos__')
cls.__abs__ = make_invalid_op('__abs__')
cls.__inv__ = make_invalid_op('__inv__')
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
return attrs
def _validate_for_numeric_unaryop(self, op, opstr):
""" validate if we can perform a numeric unary operation """
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op "
"{opstr} for type: {typ}"
.format(opstr=opstr, typ=type(self).__name__))
def _validate_for_numeric_binop(self, other, op):
"""
return valid other, evaluate or raise TypeError
if we are not of the appropriate type
internal method called by ops
"""
opstr = '__{opname}__'.format(opname=op.__name__)
# if we are an inheritor of numeric,
# but not actually numeric (e.g. DatetimeIndex/PeriodIndex)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} "
"for type: {typ}"
.format(opstr=opstr, typ=type(self).__name__))
if isinstance(other, Index):
if not other._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op "
"{opstr} with type: {typ}"
.format(opstr=opstr, typ=type(other)))
elif isinstance(other, np.ndarray) and not other.ndim:
other = other.item()
if isinstance(other, (Index, ABCSeries, np.ndarray)):
if len(self) != len(other):
raise ValueError("cannot evaluate a numeric op with "
"unequal lengths")
other = com._values_from_object(other)
if other.dtype.kind not in ['f', 'i', 'u']:
raise TypeError("cannot evaluate a numeric op "
"with a non-numeric dtype")
elif isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
# higher up to handle
pass
elif isinstance(other, (datetime, np.datetime64)):
# higher up to handle
pass
else:
if not (is_float(other) or is_integer(other)):
raise TypeError("can only perform ops with scalar values")
return other
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods """
cls.__add__ = _make_arithmetic_op(operator.add, cls)
cls.__radd__ = _make_arithmetic_op(ops.radd, cls)
cls.__sub__ = _make_arithmetic_op(operator.sub, cls)
cls.__rsub__ = _make_arithmetic_op(ops.rsub, cls)
cls.__mul__ = _make_arithmetic_op(operator.mul, cls)
cls.__rmul__ = _make_arithmetic_op(ops.rmul, cls)
cls.__rpow__ = _make_arithmetic_op(ops.rpow, cls)
cls.__pow__ = _make_arithmetic_op(operator.pow, cls)
cls.__mod__ = _make_arithmetic_op(operator.mod, cls)
cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls)
cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls)
cls.__truediv__ = _make_arithmetic_op(operator.truediv, cls)
cls.__rtruediv__ = _make_arithmetic_op(ops.rtruediv, cls)
if not compat.PY3:
cls.__div__ = _make_arithmetic_op(operator.div, cls)
cls.__rdiv__ = _make_arithmetic_op(ops.rdiv, cls)
cls.__divmod__ = _make_arithmetic_op(divmod, cls)
@classmethod
def _add_numeric_methods_unary(cls):
""" add in numeric unary methods """
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
return _evaluate_numeric_unary
cls.__neg__ = _make_evaluate_unary(operator.neg, '__neg__')
cls.__pos__ = _make_evaluate_unary(operator.pos, '__pos__')
cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__')
cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__')
@classmethod
def _add_numeric_methods(cls):
cls._add_numeric_methods_unary()
cls._add_numeric_methods_binary()
@classmethod
def _add_logical_methods(cls):
""" add in logical methods """
_doc = """
%(desc)s
Parameters
----------
*args
These parameters will be passed to numpy.%(outname)s.
**kwargs
These parameters will be passed to numpy.%(outname)s.
Returns
-------
%(outname)s : bool or array_like (if axis is specified)
A single element array_like may be converted to bool."""
_index_shared_docs['index_all'] = dedent("""
See Also
--------
pandas.Index.any : Return whether any element in an Index is True.
pandas.Series.any : Return whether any element in a Series is True.
pandas.Series.all : Return whether all elements in a Series are True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to True because these are not equal to zero.
Examples
--------
**all**
True, because nonzero integers are considered True.
>>> pd.Index([1, 2, 3]).all()
True
False, because ``0`` is considered False.
>>> pd.Index([0, 1, 2]).all()
False
**any**
True, because ``1`` is considered True.
>>> pd.Index([0, 0, 1]).any()
True
False, because ``0`` is considered False.
>>> pd.Index([0, 0, 0]).any()
False
""")
_index_shared_docs['index_any'] = dedent("""
See Also
--------
pandas.Index.all : Return whether all elements are True.
pandas.Series.all : Return whether all elements are True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to True because these are not equal to zero.
Examples
--------
>>> index = pd.Index([0, 1, 2])
>>> index.any()
True
>>> index = pd.Index([0, 0, 0])
>>> index.any()
False
""")
def _make_logical_function(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_index_shared_docs['index_' + name])
@Appender(_doc)
def logical_func(self, *args, **kwargs):
result = f(self.values)
if (isinstance(result, (np.ndarray, ABCSeries, Index)) and
result.ndim == 0):
# return NumPy type
return result.dtype.type(result.item())
else: # pragma: no cover
return result
logical_func.__name__ = name
return logical_func
cls.all = _make_logical_function('all', 'Return whether all elements '
'are True.',
np.all)
cls.any = _make_logical_function('any',
'Return whether any element is True.',
np.any)
@classmethod
def _add_logical_methods_disabled(cls):
""" add in logical methods to disable """
cls.all = make_invalid_op('all')
cls.any =
|
make_invalid_op('any')
|
pandas.core.ops.make_invalid_op
|
import streamlit as st
import pybaseball as pb
import pandas as pd
import scipy.stats as stat
import random
import pickle
import numpy as np
import plotly.express as px
import os
import itertools
import plotly.graph_objects as go
# 定数
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
YEARS = [2017, 2018, 2019, 2020]
with open(f"{APP_ROOT}/resources/svm.pickle", "rb") as f:
SVM = pickle.load(f)
with open(f"{APP_ROOT}/resources/expect.pickle", "rb") as f:
BB_TYPE_COUNT = pickle.load(f)
LAUNCH_SPEED_RANGE = np.linspace(0, 150, 150)
LAUNCH_ANGLE_RANGE = np.linspace(-90, 90, 180)
LABEL_MAP = {
"single": "単打", "double": "二塁打", "triple": "三塁打", "home_run": "本塁打", "field_out": "凡退",
"avg": "打率", "obp": "出塁率", "slg": "長打率", "ops": "OPS (出塁率+長打率)"
}
# メッセージ
INFO = """
- `Statcast` データを用いたMLBバッティングシミュレーターです。
- シミュレーションは下記手順で実行出来ます。
- サイドバーでプレーヤー名と、シミュレーションを行うデータ取得年を設定して下さい。
- `シミュレーション` ボタンを押下して下さい。データが取得され、シミュレーションが行われます。
- インプレー回数は一律500回でシミュレーションを行っています。
"""
WARN = """
### :warning: **注意** :warning:
- 打席数200を下回るとシミュレーションを行いません。
- シミュレーションを行うデータの取得には時間がかかります。
- 対策として、アプリでは同一データでの再検索時にはキャッシュを利用しています。
- 条件でデータを得られなかった場合はエラーメッセージを表示します。条件を修正して再検索を行って下さい。
"""
PLAYERID_ERROR = """
指定の選手が存在しませんでした。
姓・名のスペルが合っているか、姓・名を逆に入力していないかを確認して下さい。
"""
FROM_PLAYERID_ERROR = """
指定の比較元の選手が存在しませんでした。
姓・名のスペルが合っているか、姓・名を逆に入力していないかを確認して下さい。
"""
TO_PLAYERID_ERROR = """
指定の比較元の選手が存在しませんでした。
姓・名のスペルが合っているか、姓・名を逆に入力していないかを確認して下さい。
"""
STATCAST_ERROR = """
条件に合う `Statcast` データが存在しませんでした。
対象選手が対象シーズンにプレーしているか確認して下さい。
"""
FROM_STATCAST_ERROR = """
条件に合う比較元 `Statcast` データが存在しませんでした。
対象選手が対象シーズンにプレーしているか確認して下さい。
"""
TO_STATCAST_ERROR = """
条件に合う比較元 `Statcast` データが存在しませんでした。
対象選手が対象シーズンにプレーしているか確認して下さい。
"""
@st.cache(suppress_st_warning=True)
def __search_playerid(first_name, last_name):
players = pd.read_csv(f"{APP_ROOT}/resources/players.csv")
info = players[
(players["name_first"].str.upper() == first_name.upper()) & (players["name_last"].str.upper() == last_name.upper())
].sort_values(["mlb_played_last"], ascending=False)
return info["key_mlbam"].values
@st.cache(suppress_st_warning=True)
def __get_statcast_data(start_dt, end_dt, player_id):
return pb.statcast_batter(start_dt, end_dt, player_id)
@st.cache(suppress_st_warning=True)
def __get_bb_k_rate(first_name, last_name, year):
# K・BB%は不変を仮定
bs = __get_batting_stats(year)
bb_k_rate = bs[bs["Name"] == f"{first_name} {last_name}"]
bb_rate = bb_k_rate["BB%"].values[0]
k_rate = bb_k_rate["K%"].values[0]
return bb_rate, k_rate
@st.cache(suppress_st_warning=True)
def __get_batting_stats(year):
return pb.batting_stats(f"{year}", qual=200, stat_columns=["NAME", "BB_PCT", "K_PCT"])
def __simulate(df, first_name, last_name, year):
df = df[(df["launch_speed"].isnull() == False) & (df["launch_angle"].isnull() == False) & (df["launch_speed_angle"].isnull() == False)]
df = df[
df["events"].isin(["home_run", "field_out", "grounded_into_double_play", "single", "double_play", "double", "triple", "triple_play"])
]
df["events"] = df["events"].replace({
"grounded_into_double_play": "field_out",
"double_play": "field_out",
"triple_play": "field_out"
})
ls = df["launch_speed"].values
la = df["launch_angle"].values
lsa, lsloc, lsscale = stat.skewnorm.fit(ls)
laa, laloc, lascale = stat.skewnorm.fit(la)
sim = pd.DataFrame(columns=["pattern", "ls", "la"])
for i in range(0, 100):
pred_ls = stat.skewnorm.rvs(lsa, lsloc, lsscale, size=500)
pred_la = stat.skewnorm.rvs(laa, laloc, lascale, size=500)
pred_ls = random.sample(list(pred_ls), len(list(pred_ls)))
pred_la = random.sample(list(pred_la), len(list(pred_la)))
d = pd.DataFrame(columns=["pattern", "ls", "la"])
d["ls"] = pred_ls
d["la"] = pred_la
d["pattern"] = i
sim =
|
pd.concat([sim, d])
|
pandas.concat
|
import os
from io import BytesIO
from pathlib import Path
from datetime import datetime
from functools import partial
from math import ceil
import numpy
import pandas
from reportlab.lib.pagesizes import letter, landscape
from reportlab.platypus import (
SimpleDocTemplate,
Paragraph,
Image,
Table,
PageBreak,
Spacer,
)
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_RIGHT
from reportlab.lib.units import inch
from reportlab.lib import colors
from reportlab.pdfgen import canvas
from matplotlib import pyplot, ticker, figure
import seaborn
from pybmpdb import summary
from wqio.utils import sigFigs
from wqio import validate, viz
TODAY = datetime.today().strftime("%Y-%m-%d")
STYLES = getSampleStyleSheet()
BASEURL = "https://dot-portal-app.azurewebsites.net/api"
_FOOTERSTYLE = STYLES["Normal"].clone("footer")
_FOOTERSTYLE.fontName = "Helvetica"
_FOOTERSTYLE.fontSize = 8
_FOOTERSTYLE.alignment = TA_CENTER
_HEADERSTYLE = STYLES["Heading1"].clone("header")
_HEADERSTYLE.fontName = "Helvetica-Bold"
_HEADERSTYLE.fontSize = 16
_HEADERSTYLE.alignment = TA_RIGHT
_pal = seaborn.color_palette("deep")
BLUE = _pal[0]
GREEN = _pal[2]
def _get_units(df, col):
if not df.empty:
all_units = df[col].unique().tolist()
if not len(all_units) == 1:
raise ValueError(f"lots of {col} ({all_units})")
else:
return all_units[0]
def precip_flow_plot(precip, volume, punit, vunit) -> figure.Figure:
# fig = pyplot.figure(figsize=(7, 5), dpi=300)
# pax = fig.add_axes([0.05, 0.65, 0.90, 0.30])
# vax = fig.add_axes([0.05, 0.05, 0.90, 0.60], sharex=pax)
if not punit:
punit = "No Units"
if not vunit:
vunit = "No Units"
fig, (pax, vax) = pyplot.subplots(
nrows=2,
ncols=1,
figsize=(7.5, 4.5),
dpi=300,
sharex=True,
gridspec_kw=dict(height_ratios=[1, 2.5], hspace=0.00),
)
pax.yaxis.set_label_position("right")
pax.yaxis.tick_right()
pax.invert_yaxis()
pax.xaxis.tick_top()
pax.set_ylabel(f"Precip. ({punit})", rotation=270, va="top", labelpad=10)
vax.set_ylabel(f"Flow Volume ({vunit})")
if not precip.empty:
pax.bar("date", "PrecipDepth_Value", color="0.425", data=precip)
else:
pax.annotate(
"No Data to show",
(0.5, 0.5),
(0, 0),
xycoords="axes fraction",
textcoords="offset points",
ha="center",
va="center",
)
pax.yaxis.set_major_formatter(ticker.NullFormatter())
pax.set_ylim(top=0)
if not volume.empty:
vax.plot(
"date",
"Volume_Total",
marker="d",
linestyle="none",
label="Inflow",
color=BLUE,
data=volume.loc[volume["MSType"] == "Inflow"],
)
vax.plot(
"date",
"Volume_Total",
marker="s",
linestyle="none",
label="Outflow",
color=GREEN,
data=volume.loc[volume["MSType"] == "Outflow"],
)
vax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: f"{int(x):,d}"))
vax.legend(loc="best")
vax.set_ylim(bottom=0)
else:
vax.annotate(
"No Data to show",
(0.5, 0.5),
(0, 0),
xycoords="axes fraction",
textcoords="offset points",
ha="center",
va="center",
)
vax.yaxis.set_major_formatter(ticker.NullFormatter())
if precip.empty and volume.empty:
pax.xaxis.set_major_formatter(ticker.NullFormatter())
seaborn.despine(ax=pax, left=False, right=False, top=False, bottom=True)
seaborn.despine(ax=vax, left=False, right=False, top=True, bottom=False)
viz.rotateTickLabels(pax, -25, "x")
viz.rotateTickLabels(vax, 25, "x")
fig.tight_layout()
return fig
def _table_float(x):
if pandas.isnull(x):
return "N/A"
return sigFigs(x, 3, tex=False, pval=False, forceint=False)
def _table_int(x):
if pandas.isnull(x):
return "N/A"
return "{:,d}".format(int(x))
def _table_string(x):
if pandas.isnull(x):
return "N/A"
return str(x)
def _table_date(d):
if pandas.isnull(d):
return "N/A"
else:
return pandas.to_datetime(d).strftime("%Y-%m-%d")
def _table_cost(x):
if pandas.isnull(x):
return "N/A"
else:
return "${:,d}".format(int(x))
def _table_paragraph(text):
styleN = STYLES["BodyText"]
styleN.alignment = TA_LEFT
styleN.leading = 9
return Paragraph(f"{text}", styleN)
def _design_param_fmt(x):
if pandas.isnull(x):
return "N/A"
elif numpy.isreal(x):
if int(x) == x:
return _table_int(x)
else:
return _table_float(x)
else:
return _table_string(x)
def parse_dates(df):
if not df.empty:
return df.assign(date=lambda df: pandas.to_datetime(df["DateStart"]))
return df
class NumberedCanvasLandscape(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._saved_page_states.append(dict(self.__dict__))
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
num_pages = len(self._saved_page_states)
for state in self._saved_page_states:
self.__dict__.update(state)
self.setFont(_FOOTERSTYLE.fontName, _FOOTERSTYLE.fontSize)
self.draw_page_number(num_pages)
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self, page_count):
# Change the position of this to wherever you want the page number to be
self.drawRightString(10.5 * inch, 0.35 * inch, f"Page {self._pageNumber} of {page_count}")
# self.drawCentredString(6.5 * inch, 0.5 * inch, "Test centred")
self.drawString(0.5 * inch, 0.35 * inch, f"Generated: {TODAY}")
class NumberedCanvasPortrait(NumberedCanvasLandscape):
def draw_page_number(self, page_count):
# Change the position of this to wherever you want the page number to be
self.drawRightString(8 * inch, 0.35 * inch, f"Page {self._pageNumber} of {page_count}")
# self.drawCentredString(6.5 * inch, 0.5 * inch, "Test centred")
self.drawString(0.5 * inch, 0.35 * inch, f"Generated: {TODAY}")
def get_api_data(endpoint):
return pandas.read_json(BASEURL + endpoint, dtype={"PDFID": str}).sort_values(by=["PDFID"])
def get_sites_info():
return pandas.read_json(BASEURL + "/DOTSites", dtype={"PDFID": str}).sort_values(by=["PDFID"])
def get_climate_info():
return pandas.read_json(BASEURL + "/vClimateRecords", dtype={"PDFID": str}).sort_values(by=["PDFID"])
def get_hydro_info(pdfid, all_climate, all_precip, all_flow):
# dtype = {"PDFID": str}
# flow = pandas.read_json(BASEURL + f"/vFlowRecords?pdf_id={pdfid}", dtype=dtype).pipe(parse_dates)
# precip = pandas.read_json(BASEURL + f"/vPrecipRecords?pdf_id={pdfid}", dtype=dtype).pipe(parse_dates)
selector = lambda df: df["PDFID"] == pdfid
c = all_climate.loc[selector]
p = all_precip.loc[selector]
f = all_flow.loc[selector]
assert c.shape[0] == 1
return c.iloc[0], p, f
def get_bmp_info(pdfid, sites):
dtype = {"PDFID": str}
# bmp design meta data
meta = (
pandas.read_json(
BASEURL + f"/vBMPDesignMetas?pdf_id={pdfid}",
dtype=dtype,
)
.merge(sites, on="PDFID", suffixes=("", "_ds"), how="left")
.loc[0, lambda df: df.columns.map(lambda c: not c.endswith("_ds"))]
)
# bmp design elements
elements = pandas.read_json(BASEURL + f"/vBMPDesignElements?pdf_id={pdfid}", dtype=dtype)
if elements.shape[0] == 0:
elements = None
title = meta["BMPName"]
return meta, elements, title
def _make_table_from_df(df, headers, style, datecols=None, dateformat=None, banded=True, col_widths=None, title=None):
"""Helper function to make a reportlab table from a dataframe
Parameters
----------
df : pandas.DataFrame
Dataframe containing the data to be tabulated
headers : list of str
Column labels for the rendered table
style : list of tuples
List of reportlab-compatible table style tuples
banded : bool (default = True)
When true, every other row in the table will have a light grey
background.
title : str, optional
If provided, inserts a single-valued row above the column headers
"""
if datecols:
if not dateformat:
dateformat = "%Y-%m-%d"
df = df.assign(**{dc: df[dc].dt.strftime(dateformat) for dc in datecols})
if banded:
bands = [("BACKGROUND", (0, row), (-1, row), colors.lightgrey) for row in range(1, df.shape[0] + 1, 2)]
style = [*style, *bands]
_data = df.astype(str).applymap(_table_paragraph).values.tolist()
_headers = [_table_paragraph(h) for h in headers]
table_values = [_headers, *_data]
if title:
_blanks = ["" for _ in range(len(headers) - 1)]
_title = [title, *_blanks]
table_values = [_title, *table_values]
table = Table(table_values, repeatRows=1, repeatCols=1, style=style, colWidths=col_widths)
return table
def two_tables_next_to_eachother(leftdata, rightdata, leftheader, rightheader, col_widths, styled=True):
# headers = ("Watershed Characteristics", "", "Transportation Characteristics", "")
# col_widths = [table_width / len(headers)] * len(headers)
data = pandas.concat([leftdata, rightdata], axis="columns").fillna("")
headers = (leftheader, "", rightheader, "")
if styled:
style = [
("SPAN", (0, 0), (1, 0)), # header row, merge columns 1 and 2
("SPAN", (2, 0), (3, 0)), # header row, merge columns 3 and 4
("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"), # header row is bold
("FONTNAME", (0, 0), (0, -1), "Helvetica-Bold"), # watershed header col is bold
("FONTNAME", (2, 0), (2, -1), "Helvetica-Bold"), # DOT header col is bold
("FONTNAME", (1, 1), (1, -1), "Helvetica"), # watershed data cells are not bold
("FONTNAME", (3, 1), (3, -1), "Helvetica"), # DOT data cells are not bold
("ALIGN", (0, 0), (-1, 0), "CENTER"), # header row is centered
("ALIGN", (0, 1), (0, -1), "LEFT"), # header col is horizontally left-aligned
("ALIGN", (1, 1), (-1, -1), "LEFT"), # all other cells are horizontally centered
("VALIGN", (0, 0), (-1, -1), "TOP"), # all cells are vertically centered
("LINEBELOW", (0, 0), (-1, 0), 1, colors.black), # line below column headers
("LINEAFTER", (1, 0), (1, -1), 1, colors.black), # line btwn the two subtables
]
else:
style = [
("FONTNAME", (0, 0), (0, -1), "Helvetica-Bold"), # loc header col is bold
("FONTNAME", (2, 0), (2, -1), "Helvetica-Bold"), # bmp header col is bold
("FONTNAME", (1, 1), (1, -1), "Helvetica"), # watershed data cells are not bold
("FONTNAME", (3, 1), (3, -1), "Helvetica"), # DOT data cells are not bold
("ALIGN", (0, 0), (-2, -1), "LEFT"), # first three cols are left-aligned
("ALIGN", (-1, 0), (-1, -1), "RIGHT"), # last col is right-aligned
("VALIGN", (0, 0), (-1, -1), "TOP"), # all cells are vertically centered
]
table = _make_table_from_df(data, headers, style, banded=False, col_widths=col_widths)
return table
def normal_table(data, title, col_widths):
style = [
("SPAN", (0, 0), (-1, 0)), # header row, merge all columns 1 and 2
("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"), # title row is bold
("FONTNAME", (0, 1), (-1, 1), "Helvetica-Bold"),
# ("BACKGROUND", (0, 1), (-1, 1), colors.lightblue),
# ("FONTNAME", (0, 1), (-1, 1), "Helvetica-Bold"), # header row is bold
# ("FONTNAME", (0, 2), (0, -1), "Helvetica-Bold"), # header col is bold
# ("FONTNAME", (1, 2), (1, -1), "Helvetica"), # watershed data cells are not bold
("ALIGN", (0, 0), (-1, 0), "CENTER"), # header row is centered
("ALIGN", (0, 1), (0, -1), "LEFT"), # header col is horizontally left-aligned
("ALIGN", (1, 1), (-1, -1), "LEFT"), # all other cells are left-aligned
("VALIGN", (0, 0), (-1, -1), "TOP"), # all cells are vertically centered
# ("LINEBELOW", (0, 0), (-1, 0), 1, colors.black), # line below column title
("LINEBELOW", (0, 1), (-1, 1), 1, colors.black), # line below column headers
]
table = _make_table_from_df(data, data.columns.tolist(), style, banded=False, col_widths=col_widths, title=title)
return table
def single_table(data, header, col_widths):
# headers = (f"BMP {which} Informatiom", "")
headers = (header, "")
style = [
("SPAN", (0, 0), (1, 0)), # header row, merge columns 1 and 2
("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"), # header row is bold
("FONTNAME", (0, 0), (0, -1), "Helvetica-Bold"), # header col is bold
("FONTNAME", (1, 1), (1, -1), "Helvetica"), # watershed data cells are not bold
("ALIGN", (0, 0), (-1, 0), "CENTER"), # header row is centered
("ALIGN", (0, 1), (0, -1), "LEFT"), # header col is horizontally left-aligned
("ALIGN", (1, 1), (-1, -1), "LEFT"), # all other cells are left-aligned
("VALIGN", (0, 0), (-1, -1), "TOP"), # all cells are vertically centered
("LINEBELOW", (0, 0), (-1, 0), 1, colors.black), # line below column headers
]
table = _make_table_from_df(data, headers, style, banded=False, col_widths=col_widths)
return table
def no_info_table(header, col_width, msg):
data = [(header,), (msg,)]
style = [
("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"), # header row is bold
("LINEBELOW", (0, 0), (-1, 0), 1, colors.black), # line below column headers
("ALIGN", (0, 0), (-1, -1), "CENTER"), # center everything
("VALIGN", (0, 0), (-1, -1), "TOP"), # all cells are vertically centered
]
col_widths = validate.at_least_empty_list(col_width)
table = Table(data, repeatRows=1, repeatCols=1, style=style, colWidths=col_widths)
return table
def make_design_table(design_elements, table_width):
header = "BMP Design Information"
if design_elements is None:
table = no_info_table(header, 0.25 * table_width, "No Design Information Available")
else:
nrows = design_elements.shape[0]
if nrows < 10:
col_widths = [table_width * 0.33] * 2
table = single_table(design_elements, header, col_widths)
else:
half_rows = ceil(design_elements.shape[0] / 2)
col_widths = [table_width * 0.25] * 4
table = two_tables_next_to_eachother(
design_elements.iloc[:half_rows].reset_index(drop=True),
design_elements.iloc[half_rows:].reset_index(drop=True),
header,
header,
col_widths,
styled=True,
)
return table
def _header_footer(canvas, doc, filename, title):
# Save the state of our canvas so we can draw on it
canvas.saveState()
# Header
title = Paragraph(title, _HEADERSTYLE)
w, h = title.wrap((doc.width - 3.5 * inch), 1 * inch)
title.drawOn(canvas, 3.5 * inch, doc.height + doc.topMargin - 0.825 * inch)
logo = Image("logo-withtext.png")
logo.drawHeight *= 0.35
logo.drawWidth *= 0.35
logo.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin - 1 * inch)
# Footer
footer = Paragraph(filename, _FOOTERSTYLE)
w, h = footer.wrap(doc.width, doc.bottomMargin)
footer.drawOn(canvas, doc.leftMargin, 0.35 * inch)
# Release the canvas
canvas.restoreState()
class _PDFReportMixin:
margin = 0.5 * inch
@property
def table_width(self):
return self.width - 2 * self.margin
def render(self):
doc = SimpleDocTemplate(
self.buffer,
rightMargin=self.margin,
leftMargin=self.margin,
topMargin=self.margin * 3,
bottomMargin=self.margin,
pagesize=self.pagesize,
)
doc_elements = self.arrange_elements()
self.build(doc, doc_elements)
def build(self, doc, doc_elements):
if self.pagesize == landscape(letter):
canvasmaker = NumberedCanvasLandscape
elif self.pagesize == letter:
canvasmaker = NumberedCanvasPortrait
else:
raise NotImplementedError(f"Only letter paper is available, not {self.pagsize}")
doc.build(
doc_elements,
onFirstPage=partial(_header_footer, filename=self.filename, title=self.title),
onLaterPages=partial(_header_footer, filename=self.filename, title=self.title),
canvasmaker=canvasmaker,
)
def save(self, *folders):
for d in folders:
self.buffer.seek(0)
with Path(d, self.filename).open("wb") as out:
out.write(self.buffer.read())
class BMPDescriptionReport(_PDFReportMixin):
def __init__(self, buffer, filename, meta, design_elements, title):
self.buffer = buffer
self.pagesize = landscape(letter)
self.width, self.height = self.pagesize
self.meta = meta
self.design_elements = design_elements
self.title = title
self.filename = filename
self._loc_bmp_table = None
self._wshed_dot_table = None
self._cost_table = None
self._design_table = None
@property
def loc_bmp_table(self):
if self._loc_bmp_table is None:
self._loc_bmp_table = two_tables_next_to_eachother(
self.location_values(),
self.bmp_values(),
"",
"",
col_widths=[self.table_width * x for x in (0.10, 0.60, 0.15, 0.15)],
styled=False,
)
self._loc_bmp_table.wrap(*self.pagesize)
return self._loc_bmp_table
@property
def wshed_dot_table(self):
if self._wshed_dot_table is None:
self._wshed_dot_table = two_tables_next_to_eachother(
self.watershed_values(),
self.dot_values(),
"Watershed Characteristics",
"Transportation Characteristics",
col_widths=[self.table_width / 4] * 4,
styled=True,
)
self._wshed_dot_table.wrap(*self.pagesize)
return self._wshed_dot_table
@property
def design_table(self):
if self._design_table is None:
self._design_table = make_design_table(self.design_values(), self.table_width)
self._design_table.wrap(*self.pagesize)
return self._design_table
@property
def cost_table(self):
if self._cost_table is None:
self._cost_table = single_table(
self.cost_values(), "BMP Cost Informatiom", col_widths=[0.2 * self.table_width] * 2
)
self._cost_table.wrap(*self.pagesize)
return self._cost_table
def watershed_values(self):
watershed_names = {
"EPARainZone": ("EPA Rain Zone", _table_string),
"WSName": ("Watershed Name", _table_string),
"Type": ("Watershed Type", _table_string),
"Area": ("Total Watershed Area", _table_float),
"Area_unit": ("Area Unit", _table_string),
"AreaImpervious_pct": ("Percent Impervious", _table_float),
"NRCSSoilGroup": ("Soil Group", _table_string),
"Area_Descr": ("Watershed Description", _table_string),
"LandUse_Descr": ("Land Use Description", _table_string),
"Vegetation_Descr": ("Vegetation Description", _table_string),
}
data = {value[0]: value[1](self.meta.get(key)) for key, value in watershed_names.items()}
return
|
pandas.Series(data)
|
pandas.Series
|
import pandas
from .utils import format_file_size, timestamp_to_datetime
class StepFile(object):
"""A class that wraps the :obj:`dict` with step file data returned by the API
Args:
process (dict): Parent process data
execution (dict): Process execution instance
step_file (dict): Step file data
Returns:
A :py:class:`StepFile <slipoframes.model.StepFile>` object.
"""
def __init__(self, process: dict, execution: dict, step_file: dict):
self.__process = process
self.__execution = execution
self.__file = step_file
@property
def id(self):
"""Get step file unique id"""
return self.__file['id']
@property
def process_id(self):
return self.__process['id']
@property
def process_version(self):
return self.__process['version']
@property
def name(self):
return self.__file['name']
@property
def output_type(self):
return self.__file['type']
@property
def output_part_key(self):
"""Get step file output part key"""
return self.__file['outputPartKey']
@property
def size(self):
return self.__file['size']
def __str__(self):
return 'File ({id}, {name})'.format(id=self.id, name=self.name)
def __repr__(self):
return 'File ({id}, {name})'.format(id=self.id, name=self.name)
class Process(object):
"""A class that wraps the :obj:`dict` with process data returned by the API
Args:
record (dict): Process execution data
Returns:
A :py:class:`Process <slipoframes.model.Process>` object.
"""
def __init__(self, record: dict):
self.__process = record['process']
self.__execution = record['execution']
@property
def process(self):
"""Get process dict"""
return self.__process
@property
def execution(self):
"""Get execution dict"""
return self.__execution
@property
def id(self):
"""Get process unique id"""
return self.__process['id']
@property
def version(self):
"""Get process version"""
return self.__process['version']
@property
def status(self):
"""Get process status"""
return self.__execution['status']
@property
def name(self):
return self.__process['name']
@property
def submitted_on(self):
return timestamp_to_datetime(self.__execution['submittedOn'])
@property
def started_on(self):
return timestamp_to_datetime(self.__execution['startedOn'])
@property
def completedOn(self):
return self.__execution['completedOn']
def steps(self):
# Extract files from execution
data = self._collect_process_execution_steps(self.__execution)
df = pandas.DataFrame(data=data)
# Sort by name
df = df.sort_values(by=['Name'], axis=0)
# Reorder columns
df = df[['Name', 'Tool', 'Operation',
'Status', 'Started On', 'Completed On']]
return df
def _collect_process_execution_steps(self, exec: dict) -> pandas.DataFrame:
result = []
if not type(exec) is dict or not 'steps' in exec:
return result
for s in exec['steps']:
result.append({
'Name': s['name'],
'Tool': s['tool'],
'Operation': s['operation'],
'Status': s['status'],
'Started On': timestamp_to_datetime(s['startedOn']) or '',
'Completed On': timestamp_to_datetime(s['completedOn']) or '',
})
return result
def files(self, format_size: bool = False):
"""Get all operation files
Args:
format_size (bool, optional): If `True`, the file size is converted
to a user friendly string (default `False`).
Returns:
A :obj:`pandas.DataFrame` with all files
"""
# Extract files from execution
data = self._collect_process_execution_files(self.__execution)
df =
|
pandas.DataFrame(data=data)
|
pandas.DataFrame
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from sklearn.metrics import mean_squared_error
from math import sqrt
import os
from datetime import datetime, timedelta
matplotlib.rcParams.update({'font.size': 8})
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg =
|
pd.concat(cols, axis=1)
|
pandas.concat
|
"""
Code from Modeling and Simulation in Python.
Copyright 2020 <NAME>
MIT License: https://opensource.org/licenses/MIT
"""
import logging
logger = logging.getLogger(name="modsim.py")
# make sure we have Python 3.6 or better
import sys
if sys.version_info < (3, 6):
logger.warning("modsim.py depends on Python 3.6 features.")
import inspect
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import scipy.optimize as spo
from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.integrate import solve_ivp
from types import SimpleNamespace
from copy import copy
def flip(p=0.5):
"""Flips a coin with the given probability.
p: float 0-1
returns: boolean (True or False)
"""
return np.random.random() < p
def cart2pol(x, y, z=None):
"""Convert Cartesian coordinates to polar.
x: number or sequence
y: number or sequence
z: number or sequence (optional)
returns: theta, rho OR theta, rho, z
"""
x = np.asarray(x)
y = np.asarray(y)
rho = np.hypot(x, y)
theta = np.arctan2(y, x)
if z is None:
return theta, rho
else:
return theta, rho, z
def pol2cart(theta, rho, z=None):
"""Convert polar coordinates to Cartesian.
theta: number or sequence in radians
rho: number or sequence
z: number or sequence (optional)
returns: x, y OR x, y, z
"""
x = rho * np.cos(theta)
y = rho * np.sin(theta)
if z is None:
return x, y
else:
return x, y, z
from numpy import linspace
def linrange(start, stop=None, step=1, **options):
"""Make an array of equally spaced values.
start: first value
stop: last value (might be approximate)
step: difference between elements (should be consistent)
returns: NumPy array
"""
if stop is None:
stop = start
start = 0
n = int(round((stop-start) / step))
return linspace(start, stop, n+1, **options)
def root_scalar(func, *args, **kwargs):
"""Finds the input value that minimizes `min_func`.
Wrapper for
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root_scalar.html
func: computes the function to be minimized
bracket: sequence of two values, lower and upper bounds of the range to be searched
args: any additional positional arguments are passed to func
kwargs: any keyword arguments are passed to root_scalar
returns: RootResults object
"""
bracket = kwargs.get('bracket', None)
if bracket is None or len(bracket) != 2:
msg = ("To run root_scalar, you have to provide a "
"`bracket` keyword argument with a sequence "
"of length 2.")
raise ValueError(msg)
try:
func(bracket[0], *args)
except Exception as e:
msg = ("Before running scipy.integrate.root_scalar "
"I tried running the function you provided "
"with `bracket[0]`, "
"and I got the following error:")
logger.error(msg)
raise (e)
underride(kwargs, rtol=1e-4)
res = spo.root_scalar(func, *args, **kwargs)
if not res.converged:
msg = ("scipy.optimize.root_scalar did not converge. "
"The message it returned is:\n" + res.flag)
raise ValueError(msg)
return res
def minimize_scalar(func, *args, **kwargs):
"""Finds the input value that minimizes `func`.
Wrapper for
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html
func: computes the function to be minimized
args: any additional positional arguments are passed to func
kwargs: any keyword arguments are passed to minimize_scalar
returns: OptimizeResult object
"""
bounds = kwargs.get('bounds', None)
if bounds is None or len(bounds) != 2:
msg = ("To run maximize_scalar or minimize_scalar, "
"you have to provide a `bounds` "
"keyword argument with a sequence "
"of length 2.")
raise ValueError(msg)
try:
func(bounds[0], *args)
except Exception as e:
msg = ("Before running scipy.integrate.minimize_scalar, "
"I tried running the function you provided "
"with the lower bound, "
"and I got the following error:")
logger.error(msg)
raise (e)
underride(kwargs, method='bounded')
res = spo.minimize_scalar(func, args=args, **kwargs)
if not res.success:
msg = ("minimize_scalar did not succeed."
"The message it returned is: \n" +
res.message)
raise Exception(msg)
return res
def maximize_scalar(max_func, *args, **kwargs):
"""Finds the input value that maximizes `max_func`.
Wrapper for https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html
min_func: computes the function to be maximized
args: any additional positional arguments are passed to max_func
options: any keyword arguments are passed as options to minimize_scalar
returns: ModSimSeries object
"""
def min_func(*args):
return -max_func(*args)
res = minimize_scalar(min_func, *args, **kwargs)
# we have to negate the function value before returning res
res.fun = -res.fun
return res
def run_solve_ivp(system, slope_func, **options):
"""Computes a numerical solution to a differential equation.
`system` must contain `init` with initial conditions,
`t_end` with the end time. Optionally, it can contain
`t_0` with the start time.
It should contain any other parameters required by the
slope function.
`options` can be any legal options of `scipy.integrate.solve_ivp`
system: System object
slope_func: function that computes slopes
returns: TimeFrame
"""
system = remove_units(system)
# make sure `system` contains `init`
if not hasattr(system, "init"):
msg = """It looks like `system` does not contain `init`
as a system variable. `init` should be a State
object that specifies the initial condition:"""
raise ValueError(msg)
# make sure `system` contains `t_end`
if not hasattr(system, "t_end"):
msg = """It looks like `system` does not contain `t_end`
as a system variable. `t_end` should be the
final time:"""
raise ValueError(msg)
# the default value for t_0 is 0
t_0 = getattr(system, "t_0", 0)
# try running the slope function with the initial conditions
try:
slope_func(t_0, system.init, system)
except Exception as e:
msg = """Before running scipy.integrate.solve_ivp, I tried
running the slope function you provided with the
initial conditions in `system` and `t=t_0` and I got
the following error:"""
logger.error(msg)
raise (e)
# get the list of event functions
events = options.get('events', [])
# if there's only one event function, put it in a list
try:
iter(events)
except TypeError:
events = [events]
for event_func in events:
# make events terminal unless otherwise specified
if not hasattr(event_func, 'terminal'):
event_func.terminal = True
# test the event function with the initial conditions
try:
event_func(t_0, system.init, system)
except Exception as e:
msg = """Before running scipy.integrate.solve_ivp, I tried
running the event function you provided with the
initial conditions in `system` and `t=t_0` and I got
the following error:"""
logger.error(msg)
raise (e)
# get dense output unless otherwise specified
if not 't_eval' in options:
underride(options, dense_output=True)
# run the solver
bunch = solve_ivp(slope_func, [t_0, system.t_end], system.init,
args=[system], **options)
# separate the results from the details
y = bunch.pop("y")
t = bunch.pop("t")
# get the column names from `init`, if possible
if hasattr(system.init, 'index'):
columns = system.init.index
else:
columns = range(len(system.init))
# evaluate the results at equally-spaced points
if options.get('dense_output', False):
try:
num = system.num
except AttributeError:
num = 101
t_final = t[-1]
t_array = linspace(t_0, t_final, num)
y_array = bunch.sol(t_array)
# pack the results into a TimeFrame
results = TimeFrame(y_array.T, index=t_array,
columns=columns)
else:
results = TimeFrame(y.T, index=t,
columns=columns)
return results, bunch
def leastsq(error_func, x0, *args, **options):
"""Find the parameters that yield the best fit for the data.
`x0` can be a sequence, array, Series, or Params
Positional arguments are passed along to `error_func`.
Keyword arguments are passed to `scipy.optimize.leastsq`
error_func: function that computes a sequence of errors
x0: initial guess for the best parameters
args: passed to error_func
options: passed to leastsq
:returns: Params object with best_params and ModSimSeries with details
"""
# override `full_output` so we get a message if something goes wrong
options["full_output"] = True
# run leastsq
t = scipy.optimize.leastsq(error_func, x0=x0, args=args, **options)
best_params, cov_x, infodict, mesg, ier = t
# pack the results into a ModSimSeries object
details = SimpleNamespace(cov_x=cov_x,
mesg=mesg,
ier=ier,
**infodict)
details.success = details.ier in [1,2,3,4]
# if we got a Params object, we should return a Params object
if isinstance(x0, Params):
best_params = Params(pd.Series(best_params, x0.index))
# return the best parameters and details
return best_params, details
def crossings(series, value):
"""Find the labels where the series passes through value.
The labels in series must be increasing numerical values.
series: Series
value: number
returns: sequence of labels
"""
values = series.values - value
interp = InterpolatedUnivariateSpline(series.index, values)
return interp.roots()
def has_nan(a):
"""Checks whether the an array contains any NaNs.
:param a: NumPy array or Pandas Series
:return: boolean
"""
return np.any(np.isnan(a))
def is_strictly_increasing(a):
"""Checks whether the elements of an array are strictly increasing.
:param a: NumPy array or Pandas Series
:return: boolean
"""
return np.all(np.diff(a) > 0)
def interpolate(series, **options):
"""Creates an interpolation function.
series: Series object
options: any legal options to scipy.interpolate.interp1d
returns: function that maps from the index to the values
"""
if has_nan(series.index):
msg = """The Series you passed to interpolate contains
NaN values in the index, which would result in
undefined behavior. So I'm putting a stop to that."""
raise ValueError(msg)
if not is_strictly_increasing(series.index):
msg = """The Series you passed to interpolate has an index
that is not strictly increasing, which would result in
undefined behavior. So I'm putting a stop to that."""
raise ValueError(msg)
# make the interpolate function extrapolate past the ends of
# the range, unless `options` already specifies a value for `fill_value`
underride(options, fill_value="extrapolate")
# call interp1d, which returns a new function object
x = series.index
y = series.values
interp_func = interp1d(x, y, **options)
return interp_func
def interpolate_inverse(series, **options):
"""Interpolate the inverse function of a Series.
series: Series object, represents a mapping from `a` to `b`
options: any legal options to scipy.interpolate.interp1d
returns: interpolation object, can be used as a function
from `b` to `a`
"""
inverse = pd.Series(series.index, index=series.values)
interp_func = interpolate(inverse, **options)
return interp_func
def gradient(series, **options):
"""Computes the numerical derivative of a series.
If the elements of series have units, they are dropped.
series: Series object
options: any legal options to np.gradient
returns: Series, same subclass as series
"""
x = series.index
y = series.values
a = np.gradient(y, x, **options)
return series.__class__(a, series.index)
def source_code(obj):
"""Prints the source code for a given object.
obj: function or method object
"""
print(inspect.getsource(obj))
def underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.items():
d.setdefault(key, val)
return d
def contour(df, **options):
"""Makes a contour plot from a DataFrame.
Wrapper for plt.contour
https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.contour.html
Note: columns and index must be numerical
df: DataFrame
options: passed to plt.contour
"""
fontsize = options.pop("fontsize", 12)
underride(options, cmap="viridis")
x = df.columns
y = df.index
X, Y = np.meshgrid(x, y)
cs = plt.contour(X, Y, df, **options)
plt.clabel(cs, inline=1, fontsize=fontsize)
def savefig(filename, **options):
"""Save the current figure.
Keyword arguments are passed along to plt.savefig
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html
filename: string
"""
print("Saving figure to file", filename)
plt.savefig(filename, **options)
def decorate(**options):
"""Decorate the current axes.
Call decorate with keyword arguments like
decorate(title='Title',
xlabel='x',
ylabel='y')
The keyword arguments can be any of the axis properties
https://matplotlib.org/api/axes_api.html
"""
ax = plt.gca()
ax.set(**options)
handles, labels = ax.get_legend_handles_labels()
if handles:
ax.legend(handles, labels)
plt.tight_layout()
def remove_from_legend(bad_labels):
"""Removes some labels from the legend.
bad_labels: sequence of strings
"""
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
handle_list, label_list = [], []
for handle, label in zip(handles, labels):
if label not in bad_labels:
handle_list.append(handle)
label_list.append(label)
ax.legend(handle_list, label_list)
class SettableNamespace(SimpleNamespace):
"""Contains a collection of parameters.
Used to make a System object.
Takes keyword arguments and stores them as attributes.
"""
def __init__(self, namespace=None, **kwargs):
super().__init__()
if namespace:
self.__dict__.update(namespace.__dict__)
self.__dict__.update(kwargs)
def get(self, name, default=None):
"""Look up a variable.
name: string varname
default: value returned if `name` is not present
"""
try:
return self.__getattribute__(name, default)
except AttributeError:
return default
def set(self, **variables):
"""Make a copy and update the given variables.
returns: Params
"""
new = copy(self)
new.__dict__.update(variables)
return new
def magnitude(x):
"""Returns the magnitude of a Quantity or number.
x: Quantity or number
returns: number
"""
return x.magnitude if hasattr(x, 'magnitude') else x
def remove_units(namespace):
"""Removes units from the values in a Namespace.
Only removes units from top-level values;
does not traverse nested values.
returns: new Namespace object
"""
res = copy(namespace)
for label, value in res.__dict__.items():
if isinstance(value, pd.Series):
value = remove_units_series(value)
res.__dict__[label] = magnitude(value)
return res
def remove_units_series(series):
"""Removes units from the values in a Series.
Only removes units from top-level values;
does not traverse nested values.
returns: new Series object
"""
res = copy(series)
for label, value in res.iteritems():
res[label] = magnitude(value)
return res
class System(SettableNamespace):
"""Contains system parameters and their values.
Takes keyword arguments and stores them as attributes.
"""
pass
class Params(SettableNamespace):
"""Contains system parameters and their values.
Takes keyword arguments and stores them as attributes.
"""
pass
def State(**variables):
"""Contains the values of state variables."""
return pd.Series(variables, name='state')
def make_series(x, y, **options):
"""Make a Pandas Series.
x: sequence used as the index
y: sequence used as the values
returns: Pandas Series
"""
underride(options, name='values')
if isinstance(y, pd.Series):
y = y.values
series = pd.Series(y, index=x, **options)
series.index.name = 'index'
return series
def TimeSeries(*args, **kwargs):
"""
"""
if args or kwargs:
series = pd.Series(*args, **kwargs)
else:
series = pd.Series([], dtype=np.float64)
series.index.name = 'Time'
if 'name' not in kwargs:
series.name = 'Quantity'
return series
def SweepSeries(*args, **kwargs):
"""
"""
if args or kwargs:
series = pd.Series(*args, **kwargs)
else:
series = pd.Series([], dtype=np.float64)
series.index.name = 'Parameter'
if 'name' not in kwargs:
series.name = 'Metric'
return series
def show(obj):
"""Display a Series or Namespace as a DataFrame."""
if isinstance(obj, pd.Series):
df = pd.DataFrame(obj)
return df
elif hasattr(obj, '__dict__'):
return pd.DataFrame(pd.Series(obj.__dict__),
columns=['value'])
else:
return obj
def TimeFrame(*args, **kwargs):
"""DataFrame that maps from time to State.
"""
underride(kwargs, dtype=float)
return pd.DataFrame(*args, **kwargs)
def SweepFrame(*args, **kwargs):
"""DataFrame that maps from parameter value to SweepSeries.
"""
underride(kwargs, dtype=float)
return
|
pd.DataFrame(*args, **kwargs)
|
pandas.DataFrame
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from config import DATABASE_URI
from models import StateEntry, Estimate, Base
import numpy as np
import pandas as pd
import us
engine = create_engine(DATABASE_URI, executemany_mode='batch')
Session = sessionmaker(bind=engine)
states = [ f'{state.name}' for state in us.states.STATES ] + ['District of Columbia']
def get_pop_est(state, df):
'''
Function to create population estimates between census years
'''
years_ = range(1960, 2010)
ps = np.array(df.loc[df['state'] == state]['population'])
# Population slope between census data years
ms = np.diff(ps) / 10
# Initial population of decade
cs = ps[:-1]
# Create estimates through matrix operations
ests = np.round((np.arange(0, 10).reshape(-1, 1) * ms + np.ones(10).reshape(-1,1) * cs)).T
ests = [
{'state': state, 'year': year, 'population': int(est)}
for year, est in zip(years_, ests.flatten())
]
return ests
def main():
# Extract from database
s = Session()
q = s.query(StateEntry.state, StateEntry.year, StateEntry.population).all()
s.close()
# Transform to DataFrame
pops = [ {'state': v[0], 'year': v[1], 'population': v[2]} for v in q ]
pops =
|
pd.DataFrame(pops)
|
pandas.DataFrame
|
import warnings
warnings.filterwarnings("ignore")
import logging
import os
from os.path import join
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.models import load_model, save_model, model_from_json
from keras.utils import multi_gpu_model
from utils import utils
import model_skeleton.featuristic as featuristic
import model_skeleton.malfusion as malfusion
import model_skeleton.echelon as echelon
from keras import optimizers
from trend import activation_trend_identification as ati
import config.settings as cnst
from .train_args import DefaultTrainArguments
from plots.plots import plot_partition_epoch_history
from predict import predict
from predict.predict_args import Predict as pObj, DefaultPredictArguments, QStats
import numpy as np
from sklearn.utils import class_weight
import pandas as pd
from plots.plots import display_probability_chart
from analyzers.collect_exe_files import get_partition_data, partition_pkl_files_by_count, partition_pkl_files_by_size
import gc
from shutil import copyfile
def train(args):
""" Function for training Tier-1 model with whole byte sequence data
Args:
args: An object containing all the required parameters for training
Returns:
history: Returns history object from keras training process
"""
train_steps = len(args.t1_x_train) // args.t1_batch_size
args.t1_train_steps = train_steps - 1 if len(args.t1_x_train) % args.t1_batch_size == 0 else train_steps + 1
if args.t1_x_val is not None:
val_steps = len(args.t1_x_val) // args.t1_batch_size
args.t1_val_steps = val_steps - 1 if len(args.t1_x_val) % args.t1_batch_size == 0 else val_steps + 1
args.t1_ear = EarlyStopping(monitor='acc', patience=3)
args.t1_mcp = ModelCheckpoint(join(args.save_path, args.t1_model_name),
monitor="acc", save_best_only=args.save_best, save_weights_only=False)
data_gen = utils.direct_data_generator(args.t1_x_train, args.t1_y_train)
history = args.t1_model_base.fit(
data_gen,
class_weight=args.t1_class_weights,
steps_per_epoch=args.t1_train_steps,
epochs=args.t1_epochs,
verbose=args.t1_verbose,
callbacks=[args.t1_ear, args.t1_mcp]
# , validation_data=utils.data_generator(args.t1_x_val, args.t1_y_val, args.t1_max_len, args.t1_batch_size,
# args.t1_shuffle) , validation_steps=val_steps
)
# plot_history(history, cnst.TIER1)
return history
def train_by_blocks(args):
""" Function for training Tier-2 model with top activation blocks data
Args:
args: An object containing all the required parameters for training
Returns:
history: Returns history object from keras training process
"""
train_steps = len(args.t2_x_train) // args.t2_batch_size
args.t2_train_steps = train_steps - 1 if len(args.t2_x_train) % args.t2_batch_size == 0 else train_steps + 1
if args.t2_x_val is not None:
val_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = val_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else val_steps + 1
args.t2_ear = EarlyStopping(monitor='acc', patience=3)
args.t2_mcp = ModelCheckpoint(join(args.save_path, args.t2_model_name),
monitor="acc", save_best_only=args.save_best, save_weights_only=False)
data_gen = utils.data_generator(args.train_partition, args.t2_x_train, args.t2_y_train, args.t2_max_len, args.t2_batch_size, args.t2_shuffle)
history = args.t2_model_base.fit(
data_gen,
class_weight=args.t2_class_weights,
steps_per_epoch=args.t2_train_steps,
epochs=args.t2_epochs,
verbose=args.t2_verbose,
callbacks=[args.t2_ear, args.t2_mcp]
# , validation_data=utils.data_generator_by_section(args.q_sections, args.t2_x_val, args.t2_y_val
# , args.t2_max_len, args.t2_batch_size, args.t2_shuffle)
# , validation_steps=args.val_steps
)
# plot_history(history, cnst.TIER2)
return history
def train_by_section(args):
''' Obsolete: For block-based implementation'''
train_steps = len(args.t2_x_train)//args.t2_batch_size
args.t2_train_steps = train_steps - 1 if len(args.t2_x_train) % args.t2_batch_size == 0 else train_steps + 1
if args.t2_x_val is not None:
val_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = val_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else val_steps + 1
args.t2_ear = EarlyStopping(monitor='acc', patience=3)
args.t2_mcp = ModelCheckpoint(join(args.save_path, args.t2_model_name),
monitor="acc", save_best_only=args.save_best, save_weights_only=False)
# Check MAX_LEN modification is needed - based on proportion of section vs whole file size
# args.max_len = cnst.MAX_FILE_SIZE_LIMIT + (cnst.CONV_WINDOW_SIZE * len(args.q_sections))
data_gen = utils.direct_data_generator_by_section(args.q_sections, args.t2_x_train, args.t2_y_train)
history = args.t2_model_base.fit(
data_gen,
class_weight=args.t2_class_weights,
steps_per_epoch=len(args.t2_x_train)//args.t2_batch_size + 1,
epochs=args.t2_epochs,
verbose=args.t2_verbose,
callbacks=[args.t2_ear, args.t2_mcp]
# , validation_data=utils.data_generator_by_section(args.q_sections, args.t2_x_val, args.t2_y_val
# , args.t2_max_len, args.t2_batch_size, args.t2_shuffle)
# , validation_steps=args.val_steps
)
# plot_history(history, cnst.TIER2)
return history
def change_model(model, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE)):
""" Function to transfer weights of pre-trained Malconv to the block based model with reduced input shape.
Args:
model: An object with required parameters/hyper-parameters for loading, configuring and compiling
new_input_shape: a value <= Tier-1 model's input shape. Typically, ( Num of Conv. Filters * Size of Conv. Stride )
Returns:
new_model: new model with reduced input shape and weights updated
"""
model._layers[0].batch_input_shape = new_input_shape
new_model = model_from_json(model.to_json())
for layer in new_model.layers:
try:
layer.set_weights(model.get_layer(name=layer.name).get_weights())
logging.info("Loaded and weights set for layer {}".format(layer.name))
except Exception as e:
logging.exception("Could not transfer weights for layer {}".format(layer.name))
return new_model
def change_hydra(model, ech_model, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE)):
""" Function to transfer weights of pre-trained Malconv to the block based model with reduced input shape.
Args:
model: An object with required parameters/hyper-parameters for loading, configuring and compiling
new_input_shape: a value <= Tier-1 model's input shape. Typically, ( Num of Conv. Filters * Size of Conv. Stride )
Returns:
new_model: new model with reduced input shape and weights updated
"""
model._layers[0].batch_input_shape = new_input_shape
new_model = ech_model # model_from_json(model.to_json())
print("Updating Layer weights")
for layer in new_model.layers:
try:
layer.set_weights(model.get_layer(name=layer.name).get_weights())
logging.info("Loaded and weights set for layer {}".format(layer.name))
except Exception as e:
logging.exception("Could not transfer weights for layer {}".format(layer.name))
return new_model
def get_model1(args):
""" Function to prepare model required for Tier-1's training/prediction.
Args:
args: An object with required parameters/hyper-parameters for loading, configuring and compiling
Returns:
model1: Returns a Tier-1 model
"""
model1 = None
optimizer = optimizers.Adam(lr=0.001) # , beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
if args.resume:
if cnst.USE_PRETRAINED_FOR_TIER1:
logging.info("[ CAUTION ] : Resuming with pretrained model for TIER1 - " + args.pretrained_t1_model_name)
model1 = load_model(args.model_path + args.pretrained_t1_model_name, compile=False)
print("\n\n\nChanging model input ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n")
logging.info(str(model1.summary()))
model1 = change_model(model1, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE))
logging.info(str(model1.summary()))
model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
if cnst.NUM_GPU > 1:
multi_gpu_model1 = multi_gpu_model(model1, gpus=cnst.NUM_GPU)
# multi_gpu_model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return multi_gpu_model1
else:
logging.info("[ CAUTION ] : Resuming with old model")
model1 = load_model(args.model_path + args.t1_model_name, compile=False)
model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
if cnst.NUM_GPU > 1:
multi_gpu_model1 = multi_gpu_model(model1, gpus=cnst.NUM_GPU)
# multi_gpu_model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
return multi_gpu_model1
else:
logging.info("[CAUTION]: Proceeding training with custom model skeleton")
if args.byte:
premodel = load_model(args.model_path + args.pretrained_t1_model_name, compile=False)
echmodel = echelon.model(args.t1_max_len, args.t1_win_size)
change_hydra(premodel, echmodel)
elif args.featuristic:
model1 = featuristic.model(args.total_features)
elif args.fusion:
model1 = malfusion.model(args.max_len, args.win_size)
# optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# param_dict = {'lr': [0.00001, 0.0001, 0.001, 0.1]}
# model_gs = GridSearchCV(model, param_dict, cv=10)
# model1.summary()
return model1
def get_model2(args):
'''Obsolete: For block-based implementation'''
model2 = None
optimizer = optimizers.Adam(lr=0.001)
if args.resume:
if cnst.USE_PRETRAINED_FOR_TIER2:
logging.info("[ CAUTION ] : Resuming with pretrained model for TIER2 - " + args.pretrained_t2_model_name)
model2 = load_model(args.model_path + args.pretrained_t2_model_name, compile=False)
print("\n\n\nChanging model input ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n")
logging.info(str(model2.summary()))
model2 = change_model(model2, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE))
logging.info(str(model2.summary()))
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
logging.info("[ CAUTION ] : Resuming with old model")
model2 = load_model(args.model_path + args.t2_model_name, compile=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
# logging.info("*************************** CREATING new model *****************************")
if args.byte:
model2 = echelon.model(args.t2_max_len, args.t2_win_size)
elif args.featuristic:
model2 = featuristic.model(len(args.selected_features))
elif args.fusion:
model2 = malfusion.model(args.max_len, args.win_size)
# optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = optimizers.Adam(lr=0.001) # , beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# model2.summary()
return model2
def get_block_model2(args):
""" Function to prepare model required for Tier-2's training/prediction - For top activation block implementation.
Model's input shape is set to a reduced value specified in TIER2_NEW_INPUT_SHAPE parameter in settings.
Args:
args: An object with required parameters/hyper-parameters for loading, configuring and compiling
Returns:
model2: Returns a Tier-2 model
"""
model2 = None
optimizer = optimizers.Adam(lr=0.001)
if args.resume:
if cnst.USE_PRETRAINED_FOR_TIER2:
logging.info("[ CAUTION ] : Resuming with pretrained model for TIER2 - " + args.pretrained_t2_model_name)
model2 = load_model(args.model_path + args.pretrained_t2_model_name, compile=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
logging.info("[ CAUTION ] : Resuming with old model")
model2 = load_model(args.model_path + args.t1_model_name, compile=False)
logging.info(str(model2.summary()))
model2 = change_model(model2, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE))
logging.info(str(model2.summary()))
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
# logging.info("*************************** CREATING new model *****************************")
if args.byte:
model2 = echelon.model(args.t2_max_len, args.t2_win_size)
elif args.featuristic:
model2 = featuristic.model(len(args.selected_features))
elif args.fusion:
model2 = malfusion.model(args.max_len, args.win_size)
# optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = optimizers.Adam(lr=0.001) # , beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# model2.summary()
return model2
def train_tier1(args):
# logging.info("************************ TIER 1 TRAINING - STARTED ****************************
# Samples:", len(args.t1_x_train))
if args.tier1:
if args.byte:
return train(args)
# logging.info("************************ TIER 1 TRAINING - ENDED ****************************")
def train_tier2(args):
# logging.info("************************ TIER 2 TRAINING - STARTED ****************************")
if args.tier2:
if args.byte:
return train_by_section(args)
# print("************************ TIER 2 TRAINING - ENDED ****************************")
def evaluate_tier1(args):
""" Function to evaluate the Tier-1 model being trained at the end of each Epoch. (Not after completing each partition !)
Args:
args: An object with evaluation parameters and data.
Returns:
history: Returns a history object with Tier-1 evaluation loss and accuracy
"""
eval_steps = len(args.t1_x_val) // args.t1_batch_size
args.t1_val_steps = eval_steps - 1 if len(args.t1_x_val) % args.t1_batch_size == 0 else eval_steps + 1
history = args.t1_model_base.evaluate_generator(
# utils.train_data_generator(args.val_partition, args.t1_x_val, args.t1_y_val, args.t1_max_len, args.t1_batch_size, args.t1_shuffle),
utils.direct_data_generator(args.t1_x_val, args.t1_y_val),
steps=args.t1_val_steps,
verbose=args.t1_verbose
)
# plot_history(history, cnst.TIER1)
return history
def evaluate_tier2(args):
""" Function to evaluate the Tier-2 model being trained at the end of each Epoch. (Not after completing each partition !)
Args:
args: An object with evaluation parameters and data.
Returns:
history: Returns a history object with Tier-2 evaluation loss and accuracy
"""
eval_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = eval_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else eval_steps + 1
history = args.t2_model_base.evaluate_generator(
# utils.train_data_generator_by_section(args.spartition, args.q_sections, args.t2_x_val, args.t2_y_val, args.t2_max_len, args.t2_batch_size, args.t2_shuffle),
utils.direct_data_generator_by_section(args.q_sections, args.t2_x_val, args.t2_y_val),
steps=args.t2_val_steps,
verbose=args.t2_verbose
)
# plot_history(history, cnst.TIER2)
return history
def evaluate_tier2_block(args):
""" Function to evaluate the Tier-2 block-based model being trained at the end of each Epoch. (Not after completing each partition !)
Args:
args: An object with evaluation parameters and data.
Returns:
history: Returns a history object with block-model evaluation loss and accuracy
"""
eval_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = eval_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else eval_steps + 1
history = args.t2_model_base.evaluate_generator(
utils.data_generator(args.val_partition, args.t2_x_val, args.t2_y_val, args.t2_max_len, args.t2_batch_size, args.t2_shuffle),
steps=args.t2_val_steps,
verbose=args.t2_verbose
)
# plot_history(history, cnst.TIER2)
return history
def init(model_idx, train_partitions, val_partitions, fold_index):
""" Module for Training and Validation
# ##################################################################################################################
# OBJECTIVES:
# 1) Train Tier-1 and select its decision threshold for classification using Training data
# 2) Perform ATI over training data and select influential (Qualified) sections to be used by Tier-2
# 3) Train Tier-2 on selected PE sections' top activation blocks
# 4) Save trained models for Tier-1 and Tier-2
# ##################################################################################################################
Args:
model_idx: Default 0 for byte sequence models. Do not change.
train_partitions: list of partition indexes to be used for Training
val_partitions: list of partition indexes to be used for evaluation and validation
fold_index: current fold of cross-validation
Returns:
None (Resultant data are stored in CSV for further use)
"""
t_args = DefaultTrainArguments()
if cnst.EXECUTION_TYPE[model_idx] == cnst.BYTE: t_args.byte = True
elif cnst.EXECUTION_TYPE[model_idx] == cnst.FEATURISTIC: t_args.featuristic = True
elif cnst.EXECUTION_TYPE[model_idx] == cnst.FUSION: t_args.fusion = True
t_args.t1_model_name = cnst.TIER1_MODELS[model_idx] + "_" + str(fold_index) + ".h5"
t_args.t2_model_name = cnst.TIER2_MODELS[model_idx] + "_" + str(fold_index) + ".h5"
t_args.t1_best_model_name = cnst.TIER1_MODELS[model_idx] + "_" + str(fold_index) + "_best.h5"
t_args.t2_best_model_name = cnst.TIER2_MODELS[model_idx] + "_" + str(fold_index) + "_best.h5"
# logging.info("################################## TRAINING TIER-1 ###########################################")
# partition_tracker_df = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "partition_tracker_"+str(fold_index)+".csv")
if not cnst.SKIP_TIER1_TRAINING:
logging.info("************************ TIER 1 TRAINING - STARTED ****************************")
t_args.t1_model_base = get_model1(t_args)
best_val_loss = float('inf')
best_val_acc = 0
epochs_since_best = 0
mean_trn_loss = []
mean_trn_acc = []
mean_val_loss = []
mean_val_acc = []
cwy = []
for tp_idx in train_partitions:
cwdf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p" + str(tp_idx) + ".csv", header=None)
cwy = np.concatenate([cwy, cwdf.iloc[:, 1].values])
t_args.t1_class_weights = class_weight.compute_class_weight('balanced', np.unique(cwy), cwy)
for epoch in range(cnst.EPOCHS): # External Partition Purpose
logging.info("[ PARTITION LEVEL TIER-1 EPOCH : %s ]", epoch+1)
cur_trn_loss = []
cur_trn_acc = []
for tp_idx in train_partitions:
logging.info("Training on partition: %s", tp_idx)
tr_datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p" + str(tp_idx) + ".csv", header=None)
t_args.t1_x_train, t_args.t1_x_val, t_args.t1_y_train, t_args.t1_y_val = tr_datadf.iloc[:, 0].values, None, tr_datadf.iloc[:, 1].values, None
# t_args.t1_class_weights = class_weight.compute_class_weight('balanced',
# np.unique(t_args.t1_y_train), t_args.t1_y_train) # Class Imbalance Tackling - Setting class weights
t_args.train_partition = get_partition_data(None, None, tp_idx, "t1")
t_history = train_tier1(t_args)
cur_trn_loss.append(t_history.history['loss'][0])
cur_trn_acc.append(t_history.history['accuracy'][0])
del t_args.train_partition
gc.collect()
cnst.USE_PRETRAINED_FOR_TIER1 = False
cur_val_loss = []
cur_val_acc = []
# Evaluating after each epoch for early stopping over validation loss
logging.info("Evaluating on validation data . . .")
for vp_idx in val_partitions:
val_datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p" + str(vp_idx) + ".csv", header=None)
t_args.t1_x_train, t_args.t1_x_val, t_args.t1_y_train, t_args.t1_y_val = None, val_datadf.iloc[:, 0].values, None, val_datadf.iloc[:, 1].values
t_args.val_partition = get_partition_data(None, None, vp_idx, "t1")
v_history = evaluate_tier1(t_args)
cur_val_loss.append(v_history[0])
cur_val_acc.append(v_history[1])
del t_args.val_partition
gc.collect()
mean_trn_loss.append(np.mean(cur_trn_loss))
mean_trn_acc.append(np.mean(cur_trn_acc))
mean_val_loss.append(np.mean(cur_val_loss))
mean_val_acc.append(np.mean(cur_val_acc))
if mean_val_loss[epoch] < best_val_loss:
best_val_loss = mean_val_loss[epoch]
try:
copyfile(join(t_args.save_path, t_args.t1_model_name), join(t_args.save_path, t_args.t1_best_model_name))
except Exception as e:
logging.exception("Saving EPOCH level best model failed for Tier1")
epochs_since_best = 0
logging.info("Current Epoch Loss: %s\tCurrent Epoch Acc: %s\tUpdating best loss: %s", str(mean_val_loss[epoch]).ljust(25), str(mean_val_acc[epoch]).ljust(25), best_val_loss)
else:
logging.info("Current Epoch Loss: %s\tCurrent Epoch Acc: %s", mean_val_loss[epoch], mean_val_acc[epoch])
epochs_since_best += 1
logging.info('{} epochs passed since best val loss of {}'.format(epochs_since_best, best_val_loss))
if cnst.EARLY_STOPPING_PATIENCE_TIER1 <= epochs_since_best:
logging.info('Triggering early stopping as no improvement found since last {} epochs! Best Loss: {}'.format(epochs_since_best, best_val_loss))
try:
copyfile(join(t_args.save_path, t_args.t1_best_model_name), join(t_args.save_path, t_args.t1_model_name))
except Exception as e:
logging.exception("Retrieving EPOCH level best model failed for Tier1")
break
if epoch + 1 == cnst.EPOCHS:
try:
copyfile(join(t_args.save_path, t_args.t1_best_model_name), join(t_args.save_path, t_args.t1_model_name))
except Exception as e:
logging.exception("Retrieving EPOCH level best model failed for Tier1.")
del t_args.t1_model_base
gc.collect()
plot_partition_epoch_history(mean_trn_acc, mean_val_acc, mean_trn_loss, mean_val_loss, "Tier1_F" + str(fold_index+1))
logging.info("************************ TIER 1 TRAINING - ENDED ****************************")
else:
cnst.USE_PRETRAINED_FOR_TIER1 = False # Use model trained through Echelon
logging.info("SKIPPED: Tier-1 Training process")
if cnst.ONLY_TIER1_TRAINING:
return
# TIER-1 PREDICTION OVER TRAINING DATA [Select THD1]
min_boosting_bound = None
max_thd1 = None
b1val_partition_count = 0
if not cnst.SKIP_TIER1_VALIDATION:
logging.info("*** Prediction over Validation data in TIER-1 to select THD1 and Boosting Bound")
pd.DataFrame().to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + "data" + cnst.ESC + "b1_val_" + str(fold_index) + "_pkl.csv", header=None, index=None)
for vp_idx in val_partitions:
val_datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p"+str(vp_idx)+".csv", header=None)
predict_t1_val_data = pObj(cnst.TIER1, cnst.TIER1_TARGET_FPR, val_datadf.iloc[:, 0].values, val_datadf.iloc[:, 1].values)
predict_t1_val_data.partition = get_partition_data(None, None, vp_idx, "t1")
predict_t1_val_data = predict.predict_tier1(model_idx, predict_t1_val_data, fold_index)
predict_t1_val_data = predict.select_thd_get_metrics_bfn_mfp(cnst.TIER1, predict_t1_val_data)
min_boosting_bound = predict_t1_val_data.boosting_upper_bound if min_boosting_bound is None or predict_t1_val_data.boosting_upper_bound < min_boosting_bound else min_boosting_bound
max_thd1 = predict_t1_val_data.thd if max_thd1 is None or predict_t1_val_data.thd > max_thd1 else max_thd1
del predict_t1_val_data.partition # Release Memory
gc.collect()
val_b1datadf = pd.concat([pd.DataFrame(predict_t1_val_data.xB1), pd.DataFrame(predict_t1_val_data.yB1)], axis=1)
val_b1datadf.to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + "data" + cnst.ESC + "b1_val_"+str(fold_index)+"_pkl.csv", header=None, index=None, mode='a')
val_b1datadf = pd.read_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + "data" + cnst.ESC + "b1_val_"+str(fold_index)+"_pkl.csv", header=None)
b1val_partition_count = partition_pkl_files_by_count("b1_val", fold_index, val_b1datadf.iloc[:, 0], val_b1datadf.iloc[:, 1]) if cnst.PARTITION_BY_COUNT else partition_pkl_files_by_size("b1_val", fold_index, val_b1datadf.iloc[:, 0], val_b1datadf.iloc[:, 1])
pd.DataFrame([{"b1_train": None, "b1_val": b1val_partition_count, "b1_test": None}]).to_csv(os.path.join(cnst.DATA_SOURCE_PATH, "b1_partition_tracker_" + str(fold_index) + ".csv"), index=False)
pd.DataFrame([{"thd1": max_thd1, "thd2": None, "boosting_bound": min_boosting_bound}]).to_csv(os.path.join(cnst.PROJECT_BASE_PATH + cnst.ESC + "out" + cnst.ESC + "result" + cnst.ESC, "training_outcomes_" + str(fold_index) + ".csv"), index=False)
else:
logging.info("SKIPPED: Prediction over Validation data in TIER-1 to select THD1 and Boosting Bound")
tier1_val_outcomes = pd.read_csv(os.path.join(cnst.PROJECT_BASE_PATH + cnst.ESC + "out" + cnst.ESC + "result" + cnst.ESC, "training_outcomes_" + str(fold_index) + ".csv"))
max_val_thd1 = tier1_val_outcomes["thd1"][0]
min_val_boosting_bound = tier1_val_outcomes["boosting_bound"][0]
if not cnst.SKIP_TIER1_TRAINING_PRED:
logging.info("*** Prediction over Training data in TIER-1 to generate B1 data for TIER-2 Training")
pd.DataFrame().to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + "data" + cnst.ESC + "b1_train_" + str(fold_index) + "_pkl.csv", header=None, index=None)
for tp_idx in train_partitions:
tr_datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p" + str(tp_idx) + ".csv", header=None)
predict_t1_train_data = pObj(cnst.TIER1, cnst.TIER1_TARGET_FPR, tr_datadf.iloc[:, 0].values, tr_datadf.iloc[:, 1].values)
predict_t1_train_data.thd = max_val_thd1
predict_t1_train_data.boosting_upper_bound = min_val_boosting_bound
predict_t1_train_data.partition = get_partition_data(None, None, tp_idx, "t1")
predict_t1_train_data = predict.predict_tier1(model_idx, predict_t1_train_data, fold_index)
predict_t1_train_data = predict.select_thd_get_metrics_bfn_mfp(cnst.TIER1, predict_t1_train_data)
del predict_t1_train_data.partition # Release Memory
gc.collect()
train_b1data_partition_df = pd.concat([pd.DataFrame(predict_t1_train_data.xB1),
|
pd.DataFrame(predict_t1_train_data.yB1)
|
pandas.DataFrame
|
# Spectral_Analysis_Amp_and_Phase.py
import os
import numpy as np
import pandas as pd
import scipy.linalg as la
import matplotlib.pyplot as plt
# Import time from the data or define it
t = np.arange(0.015, 0.021, 10**-7)
dt = 10**-7
# Define trainsize and number of modes
trainsize = 20000 # Number of snapshots used as training data.
num_modes = 44 # Number of POD modes.
reg = 0 # Just an input in case we regularize DMDc.
# Locate the full data of snapshots FOM and ROMs (INPUT)
Folder_name_data = 'C:\\Users\\Admin\\Desktop\\combustion\\'
file_name_FOM = 'traces_gems_60k_final.npy'
file_name_ROM_DMDc = 'traces_rom_DMDc_rsvd.npy'
file_name_ROM_cubic_r25 = 'traces_rom_cubic_tripple_reg_r25.npy'
file_name_ROM_cubic_r44 = 'traces_rom_cubic_r44.npy'
file_name_ROM_Quad_r44 = 'traces_rom_60k_100_30000.npy'
# Define output file location and file names to identify phase and amplitudes (OUTPUT)
folder_name = "C:\\Users\\Admin\\Desktop\\combustion\\spectral\\Final_plots\\"
Amp_name = folder_name + "\\" + "Amp" # Amplitude plots
Phase_name = folder_name + "\\" + "Phase" # Phase plots
# Load the data
FOM_ = np.load(Folder_name_data + file_name_FOM)
ROM_DMDc = np.load(Folder_name_data + file_name_ROM_DMDc)
ROM_cubic_r25 = np.load(Folder_name_data + file_name_ROM_cubic_r25)
ROM_cubic_r44 = np.load(Folder_name_data + file_name_ROM_cubic_r44)
ROM_Quad_r44 = np.load(Folder_name_data + file_name_ROM_Quad_r44)
# Plotting adjustments
End_plot_at = 60000 # 59990 # 40000
freq_limit_to_plot = 15000
# =============================================================================
def lineplots_timeseries(FOM_,
ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber, unit, savefile):
"""Plots for comparision of data in time. Check the saved data in
folder_name.
Parameters
----------
FOM_
Full order model data input
ROM_Quad_r44
Q-OPINF at r = 44
ROM_cubic_r25
C-OPINF at r = 25
ROM_cubic_r44
C-OPINF at r = 44
ROM_DMDc
DMDc results
datanumber
Defines the state parameter
* -12 = Pressure
* -8 = Vx
* -4 = Vy
* 0 = Temperature
* 8 = [CH4]
* 12 = [O2]
* 16 = [H2O]
* 20 = [CO2]
unit
Unit for each variable (Pa, Kelvin...)
savefile
Suffix to save the file name
"""
print("Time series plots")
plt.xlim([0.015, 0.021]) # set axis limits
plt.plot(t[0:End_plot_at],
pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at],
label='FOM', linestyle='solid', c='k')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at],
label='Q-OPINF', linestyle='dashed', c='#ff7f0e')
# plt.plot(t[0:End_plot_at],
# pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at],
# label='C-OPINF_r25', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at],
label='C-OPINF', linestyle='dashed', c='b')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at],
label='DMDc', linestyle='dashdot', c='r')
plt.xlabel('time')
plt.ylabel(unit)
plt.axvline(x=t[0] + trainsize*dt, color='black')
plt.legend()
fname = f"{T_st}_ts_{trainsize}_r_{num_modes}_reg_{reg}{savefile}.pdf"
plt.savefig(os.path.join(folder_name, fname),
bbox_inches="tight", dpi=200)
plt.show()
def L2errorplots(FOM_, ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber, unit):
"""Plot L2 norm error comparision between all the ROMs.
Parameters
----------
FOM_
Full order model data input
ROM_Quad_r44
Q-OPINF at r = 44
ROM_cubic_r25
C-OPINF at r = 25
ROM_cubic_r44
C-OPINF at r = 44
ROM_DMDc
DMDc results
datanumber
Defines the state parameter
* -12 = Pressure
* -8 = Vx
* -4 = Vy
* 0 = Temperature
* 8 = [CH4]
* 12 = [O2]
* 16 = [H2O]
* 20 = [CO2]
unit
Unit for each variable (Pa, Kelvin...)
"""
print("L2 norm error plot")
e_ROM_Quad_r44 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_cubic_r25 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_cubic_r44 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_DMDc = (la.norm(
|
pd.DataFrame(FOM_)
|
pandas.DataFrame
|
#its called from FeatureStore before generating features we get results from this analysis and decide features.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
from wordcloud import WordCloud
import nltk
from nltk import word_tokenize, sent_tokenize
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from gensim import corpora, models
from gensim.models import TfidfModel
import gensim
nltk.download('wordnet')
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
import sys
import os
from datetime import datetime
from utils.newspipeline import NewsPipeline
from TopicExtractor.src.utils.mlflow.metadatastore import *
from TopicExtractor.src.utils.pipelineconfig import PipelineConfig
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
DATA_PATH = os.path.join(BASE_DIR,'data')
class DataAnalysis(NewsPipeline):
def __init__(self):
print('DataAnalysis instantiated')
self.__wordCloudfilename = 'WordCloud.png'
super().__init__()
@mlflowtimed
def _process(self,df_articles):
try:
self.__config = PipelineConfig.getPipelineConfig(self)
if self.__config['Enable']:
df =
|
pd.DataFrame(df_articles)
|
pandas.DataFrame
|
import requests
import pandas as pd
import json
def load_data():
stations = [{'name': '<NAME>' , 'id': 97280}, {'name': '<NAME>' , 'id': 97100}]
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
### plot and layout
import pandas as pd
import altair as alt
import geopandas as gpd
from si_prefix import si_format
data = pd.read_csv("./data/processed/cleaned_salaries.csv")
scale_plots = 0.8
def plot_salary_heatmap(xmax, xcon):
source = data.copy()
source = source[(source["Age"] > 0) & (source["Salary_USD"] <= xmax[1]) & (source["Salary_USD"] >= xmax[0])]
if xcon is not None:
source = source[source["Country"] == xcon]
else:
xcon = "the World"
x_bin_num = max(int((source.shape[0] / 6) ** 0.65), 6)
y_bin_num = max(int((source.shape[0] / 6) ** 0.65), 6)
chart = (
alt.Chart(source)
.mark_rect()
.encode(
x=alt.X("Age:Q", bin=alt.Bin(maxbins=x_bin_num), title=None),
y=alt.Y(
"Salary_USD:Q",
bin=alt.Bin(maxbins=y_bin_num),
title="Salary in USD",
axis=alt.Axis(format="~s"),
),
tooltip="count()",
color=alt.Color(
"count()",
scale=alt.Scale(scheme="greenblue"),
legend=alt.Legend(title="Counts"),
),
)
.properties(
title=f"Heatmap of {xcon}",
width=scale_plots * 280,
height=scale_plots * 200,
)
)
bar = (
alt.Chart(source)
.mark_bar()
.encode(
x=alt.X("Age:Q"),
y=alt.Y("count()", title="Counts"),
)
.properties(
width=scale_plots * 280,
height=scale_plots * 130,
)
)
fchart = alt.vconcat(chart, bar, spacing=0)
return fchart.to_html()
def plot_gender_boxplot(xmax, xcon):
source = data.copy()
source = source[(source["Age"] > 0) & (source["Salary_USD"] <= xmax[1]) & (source["Salary_USD"] >= xmax[0])]
source = source.dropna(subset=["GenderSelect"])
source["GenderSelect"] = source["GenderSelect"].replace(
"Non-binary, genderqueer, or gender non-conforming", "A different identity"
)
if xcon is not None:
source = source[source["Country"] == xcon]
else:
xcon = "the World"
chart = (
alt.Chart(source)
.mark_boxplot()
.encode(
x=alt.X(
"Salary_USD:Q",
title="Salary in USD",
axis=alt.Axis(format="~s"),
scale=alt.Scale(zero=False),
),
y=alt.Y("GenderSelect", title="Gender"),
tooltip="count()",
color=alt.Color("GenderSelect", title="Gender"),
)
.configure_legend(orient="bottom")
.properties(
title=f"Boxplot by gender in {xcon}",
width=scale_plots * 420,
height=scale_plots * 120,
)
.interactive()
)
return chart.to_html()
def plot_edu_histo(xmax, xcon, stack):
education_order = [
"Less than bachelor's degree",
"Bachelor's degree",
"Master's degree",
"Doctoral degree",
]
remote_order = ["Always", "Most of the time", "Sometimes", "Rarely", "Never"]
source = data.copy()
source = source[(source["Age"] > 0) & (source["Salary_USD"] <= xmax[1]) & (source["Salary_USD"] >= xmax[0])]
if xcon is not None:
source = source.query("Country == @xcon")
else:
xcon = "the World"
if stack == "FormalEducation":
for idx, i in enumerate(source["FormalEducation"]):
if i in education_order[1:]:
continue
else:
source["FormalEducation"].iloc[idx] = "Less than bachelor's degree"
else:
for idx, i in enumerate(source["RemoteWork"]):
if i in remote_order[1:]:
continue
else:
source["RemoteWork"].iloc[idx] = "No data"
if stack == "FormalEducation":
chart = (
alt.Chart(source)
.mark_bar()
.encode(
x=alt.X(
"Salary_USD",
axis=alt.Axis(format="~s"),
bin=alt.Bin(maxbins=20),
title="Salary in USD",
),
y=alt.Y("count()", title="Counts"),
color=alt.Color(
"FormalEducation",
sort=education_order,
title="Education level",
legend=alt.Legend(columns=2),
),
order=alt.Order("education_order:Q"),
)
.configure_legend(orient="bottom", titleFontSize=11, labelFontSize=11)
.properties(
title=f"Histogram of {xcon}",
width=scale_plots * 300,
height=scale_plots * 120,
)
.configure_axis(labelFontSize=12)
)
else:
chart = (
alt.Chart(source)
.mark_bar()
.encode(
x=alt.X(
"Salary_USD",
axis=alt.Axis(format="~s"),
bin=alt.Bin(maxbins=20),
title="Salary in USD",
),
y=alt.Y("count()", title="Counts"),
color=alt.Color(
"RemoteWork",
sort=remote_order,
title="Remote working",
legend=alt.Legend(columns=3),
),
order=alt.Order("remote_order:Q"),
)
.configure_legend(orient="bottom", titleFontSize=11, labelFontSize=11)
.properties(
title=f"Histogram of {xcon}",
width=scale_plots * 300,
height=scale_plots * 120,
)
.configure_axis(labelFontSize=12)
)
return chart.to_html()
def plot_map(xcon):
world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
world["name"] = world["name"].apply(
lambda x: str.lower(" ".join(x.split(" ")[0:2]))
)
world = world.loc[world["name"] != "antarctica"]
world.rename({"name": "Country"}, axis=1, inplace=True)
source = data.copy()
source = source[["Country", "Salary_USD"]].groupby("Country").median().reset_index()
source["Country"] = source["Country"].apply(lambda x: str.lower(x))
datamap =
|
pd.merge(world, source, how="left")
|
pandas.merge
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
import sqlite3
from pathlib import Path
from os import getenv
import numpy as np
import pandas as pd
from pandas_datareader.nasdaq_trader import get_nasdaq_symbols
np.random.seed(42)
idx = pd.IndexSlice
PROJECT_DIR = Path('..', '..')
data_path = PROJECT_DIR / 'data' / 'nasdaq100'
ZIPLINE_ROOT = getenv('ZIPLINE_ROOT')
if not ZIPLINE_ROOT:
quandl_path = Path('~', '.zipline', 'data', 'quandl').expanduser()
else:
quandl_path = Path(ZIPLINE_ROOT, 'data', 'quandl')
downloads = sorted([f.name for f in quandl_path.iterdir() if f.is_dir()])
if not downloads:
print('Need to run "zipline ingest" first')
exit()
download_timestamp = downloads[-1]
adj_db_path = quandl_path / 'adjustments.sqlite'
equities_db_path = quandl_path / 'assets-7.sqlite'
def read_sqlite(table, con):
return pd.read_sql("SELECT * FROM " + table, con=con).dropna(how='all', axis=1)
def get_equities():
nasdaq100 = pd.read_hdf(data_path / 'data.h5', '1min_trades')
equities_con = sqlite3.connect(equities_db_path.as_posix())
equities = read_sqlite('equity_symbol_mappings', equities_con)
all_tickers = nasdaq100.index.get_level_values('ticker').unique()
tickers_with_meta = np.sort(all_tickers.intersection(pd.Index(equities.symbol)))
nasdaq_info = (get_nasdaq_symbols()
.reset_index()
.rename(columns=lambda x: x.lower().replace(' ', '_'))
.loc[:, ['symbol', 'security_name']]
.rename(columns={'security_name': 'asset_name'}))
nasdaq_tickers = pd.DataFrame({'symbol': tickers_with_meta}).merge(nasdaq_info, how='left')
nasdaq_sids = (equities.loc[equities.symbol.isin(nasdaq_tickers.symbol),
['symbol', 'sid']])
nasdaq_tickers = (nasdaq_tickers.merge(nasdaq_sids, how='left')
.reset_index()
.rename(columns={'sid': 'quandl_sid', 'index': 'sid'}))
nasdaq_tickers.to_hdf('algoseek.h5', 'equities')
def get_dividends():
equities = pd.read_hdf('algoseek.h5', 'equities')
adjustments_con = sqlite3.connect(adj_db_path.as_posix())
div_cols = ['sid', 'ex_date', 'declared_date', 'pay_date', 'record_date', 'amount']
dividends = read_sqlite('dividend_payouts', adjustments_con)[['sid', 'ex_date', 'amount']]
dividends = (dividends.rename(columns={'sid': 'quandl_sid'})
.merge(equities[['quandl_sid', 'sid']])
.drop('quandl_sid', axis=1))
print(dividends.loc[:, div_cols].info())
dividends.reindex(div_cols, axis=1).to_hdf('algoseek.h5', 'dividends')
def get_splits():
split_cols = ['sid', 'effective_date', 'ratio']
equities = pd.read_hdf('algoseek.h5', 'equities')
adjustments_con = sqlite3.connect(adj_db_path.as_posix())
splits = read_sqlite('splits', adjustments_con)[split_cols]
splits = (splits.rename(columns={'sid': 'quandl_sid'})
.merge(equities[['quandl_sid', 'sid']])
.drop('quandl_sid', axis=1)
)
print(splits.loc[:, split_cols].info())
splits.loc[:, split_cols].to_hdf('algoseek.h5', 'splits')
def get_ohlcv_by_ticker():
equities =
|
pd.read_hdf('algoseek.h5', 'equities')
|
pandas.read_hdf
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import statsmodels
from matplotlib import pyplot
from scipy import stats
import statsmodels.api as sm
import warnings
from itertools import product
import datetime as dt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
from pandas import DataFrame
from pandas import concat
from pandas import Series
from math import sqrt
from sklearn.metrics import mean_squared_error
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
data = pd.read_csv('Data/All_Merged.csv') # , parse_dates=[0], date_parser=dateparse
data.isna().sum()
# Inserting 0 for NA
data.fillna(0, inplace=True)
# plt.figure(figsize=[10,4])
# plt.title('BTC Price (USD) Daily')
# plt.plot(data.price, '-', label='Daily')
# Monthly
data['date'] = pd.to_datetime(data['date'])
data['date'] = data['date'].dt.tz_localize(None)
data = data.groupby([pd.Grouper(key='date', freq='M')]).first().reset_index()
data = data.set_index('date')
data['price'].fillna(method='ffill', inplace=True)
# Decomposition - only for price though!
# decomposition = sm.tsa.seasonal_decompose(data.price)
#
# trend = decomposition.trend
# seasonal = decomposition.seasonal
# residual = decomposition.resid
#
# fig = plt.figure(figsize=(10,8))
#
# plt.subplot(411)
# plt.plot(data.price, label='Original')
# plt.legend(loc='best')
# plt.subplot(412)
# plt.plot(trend, label='Trend')
# plt.legend(loc='best')
# plt.subplot(413)
# plt.plot(seasonal,label='Seasonality')
# plt.legend(loc='best')
# plt.subplot(414)
# plt.plot(residual, label='Residuals')
# plt.legend(loc='best')
#
# fig.suptitle('Decomposition of Prices Data')
# plt.show()
# Setting the data structure
reframed = series_to_supervised(data, 1, 1)
# Also removing the lagged price, as this will be created in the ARIMA
reframed.drop(reframed.columns[[0,8, 9, 10, 11, 12, 13]], axis=1, inplace=True)
print(reframed.head())
# split data
split_date = '2018-06-25'
reframed_train = reframed.loc[reframed.index <= split_date].copy()
reframed_test = reframed.loc[reframed.index > split_date].copy()
# Prøver lige ARIMA på original data
# Det her er en seasonal ARIMA, SARIMA, så nok ekstra resultat efter en regulær ARIMA
# Hjælp til kommentering findes her: https://machinelearningmastery.com/sarima-for-time-series-forecasting-in-python/
# Den fitter fint hvis man ikke opdeler i train og test..
# Initial approximation of parameters
Qs = range(0, 2)
qs = range(0, 3)
Ps = range(0, 3)
ps = range(0, 3)
D=1
d=1
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
len(parameters_list)
x_train = reframed_train.iloc[:,:-1].values
y_train = reframed_train.iloc[:,-1]
x_test = reframed_test.iloc[:,:-1].values
y_test = reframed_test.iloc[:,-1]
# Model Selection
results = []
best_aic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
try:
model=sm.tsa.statespace.SARIMAX(endog=y_train, exog=x_train, order=(param[0], d, param[1]),
seasonal_order=(param[2], D, param[3], 12),enforce_stationarity=True,
enforce_invertibility=True).fit(disp=-1)
except ValueError:
print('wrong parameters:', param)
continue
aic = model.aic
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
# Best Models
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
print(result_table.sort_values(by = 'aic', ascending=True).head())
print(best_model.summary())
# Residual plot of the best model
fig = plt.figure(figsize=(10,4))
best_model.resid.plot()
fig.suptitle('Residual Plot of the Best Model')
print("Dickey–Fuller test:: p=%f" % sm.tsa.stattools.adfuller(best_model.resid)[1])
# Dickey–Fuller test:: p=0.xxx -> Residuals are stationary
df_month2 = data[['price']]
future = pd.DataFrame()
df_month2 = pd.concat([df_month2, future])
df_month2['forecast'] = best_model.predict(start = len(x_train), end = len(x_train)+len(x_test)-1, exog=x_test)
plt.figure(figsize=(8,4))
df_month2.price.plot()
df_month2.forecast.plot(color='r', ls='--', label='Predicted Price')
plt.legend()
plt.title('Bitcoin Prices (USD) Predicted vs Actuals, by months')
plt.ylabel('mean USD')
plt.show()
# Daily version
df = pd.read_csv('Data/All_Merged.csv')
df.isna().sum()
# Inserting 0 for NA
df.fillna(0, inplace=True)
# Date type
df['date'] =
|
pd.to_datetime(df['date'])
|
pandas.to_datetime
|
#!/usr/bin/env python
# coding: utf-8
# # Benchmark Results
# This notebook visualizes the output from the different models on different classification problems
# In[1]:
import collections
import glob
import json
import os
import numpy as np
import pandas as pd
from plotnine import *
from saged.utils import split_sample_names, create_dataset_stat_df, get_dataset_stats, parse_map_file
# ## Set Up Functions and Get Metadata
# In[3]:
def return_unlabeled():
# For use in a defaultdict
return 'unlabeled'
# In[4]:
data_dir = '../../data/'
map_file = os.path.join(data_dir, 'sample_classifications.pkl')
sample_to_label = parse_map_file(map_file)
sample_to_label = collections.defaultdict(return_unlabeled, sample_to_label)
# In[ ]:
metadata_path = os.path.join(data_dir, 'aggregated_metadata.json')
metadata = None
with open(metadata_path) as json_file:
metadata = json.load(json_file)
sample_metadata = metadata['samples']
# In[ ]:
experiments = metadata['experiments']
sample_to_study = {}
for study in experiments:
for accession in experiments[study]['sample_accession_codes']:
sample_to_study[accession] = study
# ## Sepsis classification
# In[8]:
in_files = glob.glob('../../results/single_label.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# In[11]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=3)
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# ## All labels
# In[12]:
in_files = glob.glob('../../results/all_labels.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[13]:
metrics = None
for path in in_files:
if metrics is None:
metrics = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
metrics['unsupervised'] = unsupervised_model
metrics['supervised'] = supervised_model
else:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
metrics = pd.concat([metrics, new_df])
metrics
# In[14]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# In[15]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=2)
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# # Subsets of healthy labels
# In[16]:
in_files = glob.glob('../../results/subset_label.sepsis*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[17]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[18]:
print(sepsis_metrics[sepsis_metrics['healthy_used'] == 1])
# In[19]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[20]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[21]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Same analysis, but with tb instead of sepsis
# In[22]:
in_files = glob.glob('../../results/subset_label.tb*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[23]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[24]:
print(tuberculosis_metrics[tuberculosis_metrics['healthy_used'] == 1])
# In[25]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[26]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[27]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Supervised Results Only
# The results above show that unsupervised learning mostly hurts performance rather than helping.
# The visualizations below compare each model based only on its supervised results.
# In[28]:
supervised_sepsis = sepsis_metrics[sepsis_metrics['unsupervised'] == 'untransformed']
# In[29]:
plot = ggplot(supervised_sepsis, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[30]:
supervised_tb = tuberculosis_metrics[tuberculosis_metrics['unsupervised'] == 'untransformed']
# In[31]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[32]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Batch Effect Correction
# In[33]:
in_files = glob.glob('../../results/subset_label.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[34]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
print(model_info)
model_info = model_info.split('.')
print(model_info)
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[35]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[36]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## TB Batch effect corrected
# In[37]:
in_files = glob.glob('../../results/subset_label.tb*be_corrected.tsv')
print(in_files[:5])
# In[38]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[39]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[40]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Better Metrics, Same Label Distribution in Train and Val sets
# In[11]:
in_files = glob.glob('../../results/keep_ratios.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[12]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[13]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[14]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[15]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[16]:
sepsis_stat_df = create_dataset_stat_df(sepsis_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'sepsis')
sepsis_stat_df.tail(5)
# In[17]:
ggplot(sepsis_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[18]:
plot = ggplot(sepsis_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Effect of All Sepsis Data')
plot
# ## Same Distribution Tuberculosis
# In[19]:
in_files = glob.glob('../../results/keep_ratios.tb*be_corrected.tsv')
print(in_files[:5])
# In[20]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics = tb_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
tb_metrics['healthy_used'] = tb_metrics['healthy_used'].round(1)
tb_metrics
# In[21]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[22]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[23]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[24]:
tb_stat_df = create_dataset_stat_df(tb_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'tb')
tb_stat_df.tail(5)
# In[55]:
ggplot(tb_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[25]:
plot = ggplot(tb_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot
# ## Results from Small Datasets
# In[57]:
in_files = glob.glob('../../results/small_subsets.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[58]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[59]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[60]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[61]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## Small Training Set TB
# In[62]:
in_files = glob.glob('../../results/small_subsets.tb*be_corrected.tsv')
print(in_files[:5])
# In[63]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[64]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size Effects (equal label counts)')
print(plot)
# In[65]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size vs Models (equal label counts)')
print(plot)
# In[66]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth(method='loess')
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('TB (lack of a) Crossover Point')
plot
# ## Small training sets without be correction
# In[67]:
in_files = glob.glob('../../results/small_subsets.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[68]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[69]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[70]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[71]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## TB Not Batch Effect Corrected
# In[72]:
in_files = glob.glob('../../results/small_subsets.tb*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[73]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[74]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('tb Dataset Size Effects (equal label counts)')
print(plot)
# In[75]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('tb Datset Size by Model (equal label counts)')
print(plot)
# In[76]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('tb Crossover Point')
plot
# ## Large training sets without be correction
# In[6]:
in_files = glob.glob('../../results/keep_ratios.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df =
|
pd.read_csv(path, sep='\t')
|
pandas.read_csv
|
'''
fetch servant list and noble phantasm time from fgo atwiki
'''
import pandas
import json
def fetch_ordered_servants():
'''
fetch servant list
'''
url = 'https://w.atwiki.jp/f_go/pages/713.html'
dfs = pandas.read_html(url, match='マシュ・キリエライト', header=0, index_col=0)
if dfs:
servants = dfs[0]
else:
raise IOError('Servants table not found')
servants.drop(index='No.', inplace=True)
servants.rename(columns=str.lower, inplace=True)
servants.columns.name = 'number'
np_split = servants['宝具'].str.extract('(?P<range>.*)(?P<color>[ABQABQ])')
servants = pandas.concat([servants, np_split], axis=1)
servants.replace({'color': {'A': 'A', 'B': 'B', 'Q': 'Q'}}, inplace=True)
return servants
def fetch_hidden_status():
'''
fetch hidden status including noble phantasm time
'''
url = 'https://w.atwiki.jp/f_go/pages/304.html'
dfs = pandas.read_html(url, match='宝具長さ', header=(0, 1), index_col=0)
if dfs:
servants = dfs[1]
else:
raise IOError('Hidden status table not found')
servants.drop(index='No', inplace=True)
servants.rename(columns=str.lower, inplace=True)
servants.columns = ('_'.join(dict.fromkeys(c)) for c in servants.columns)
servants.columns.name = 'number'
servants = servants[~servants.index.duplicated(keep='first')]
np_times = servants['宝具長さ_倍速'].str.extractall(r'(?P<time>\d{1,2}\.\d)(?:[@@](?P<cond>.\D?))?')
min_times = np_times.astype({'time': 'float'}).sort_values(by='time').groupby(level=0).head(1).sort_index(axis=0)
min_times.reset_index(level=1, inplace=True)
servants =
|
pandas.concat([servants, min_times], axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
import time
import numpy as np
import pandas as pd
from sklearn import metrics
from dramkit.optimizer.ga import ga
from dramkit.optimizer.cs import cs
from dramkit.optimizer.pso import pso
from dramkit.optimizer.gwo import gwo
from dramkit.optimizer.woa import woa
from dramkit.optimizer.hho import hho
from dramkit.optimizer.utils_heuristic import FuncOpterInfo
from dramkit import plot_series
from dramkit import simple_logger, close_log_file
from dramkit.logtools.logger_general import get_logger
import matplotlib as mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['font.serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False
import matplotlib.pyplot as plt
#%%
def LR_cls_bin_objf(W_b, X_train=None, y_train=None, p_cut=0.5):
'''
逻辑回归二分类目标函数
'''
def sigmoid(x):
'''sigmoid激活函数'''
return 1.0 / (1 + np.exp(-x))
def forward(X, W):
'''前向传播(模型表达式)'''
return sigmoid(np.dot(X, W))
def XaddConst(X):
'''X添加常数列'''
const = np.ones((X.shape[0], 1))
return np.concatenate((X, const), axis=1)
W = np.array(W_b).reshape(-1, 1)
X_train, y_train = np.array(X_train), np.array(y_train)
Xconst = XaddConst(X_train) # X添加常数项
# y转化为二维
if len(y_train.shape) == 1 or y_train.shape[1] == 1:
y_train = y_train.reshape(-1, 1)
y_pre_p = forward(Xconst, W)
y_pre = (y_pre_p >= p_cut).astype(int)
error = 1 - metrics.accuracy_score(y_train, y_pre)
return error
def plot_result(data, w, b, title=None):
plt.figure(figsize=(10, 7))
data0 = data[data['y'] == 0]
data1 = data[data['y'] == 1]
plt.plot(data0['x1'], data0['x2'], 'ob', label='y=0')
plt.plot(data1['x1'], data1['x2'], 'or', label='y=1')
x = np.arange(data['x1'].min(), data['x1'].max(), 0.1)
y = (-b - w[0]*x) / w[1]
plt.plot(x, y, '-')
plt.legend(loc=0)
if title:
plt.title(title)
plt.show()
#%%
if __name__ == '__main__':
strt_tm = time.time()
#%%
# 分类任务数据集
data =
|
pd.read_excel('../../datsci/test/test_data1.xlsx')
|
pandas.read_excel
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 2 16:00:36 2019
@author: Stefan
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from pandas.plotting import autocorrelation_plot
from math import sqrt
import datetime
import os
#### INNER FUNCTIONS #### (not to be called in main)
#create 3d inputs for lstm validation and train samples (default validation size 3 weeks)
def input_for_LSTM(features, output, validation_size, test_size):
features = features.copy(deep=True)
output = output.copy(deep=True)
scaler = MinMaxScaler(copy=True, feature_range=(0, 1))
features = scaler.fit_transform(features)
features = pd.DataFrame(features)
X_train = features.iloc[:-validation_size,:]
X_val = features.iloc[-validation_size:-test_size,:]
y_train = output['pc'][:-validation_size]
y_val = output['pc'][-validation_size:-test_size]
X_train = X_train.as_matrix(columns = None)
X_train = X_train.reshape(X_train.shape[0:][0],1,X_train.shape[1:][0])
X_val = X_val.as_matrix(columns = None)
X_val = X_val.reshape(X_val.shape[0:][0],1,X_val.shape[1:][0])
return X_train, X_val, y_train, y_val
#create 3d test samples for the lstm
def test_for_LSTM(features, output, test_size):
features = features.copy(deep=True)
output = output.copy(deep=True)
scaler = MinMaxScaler(copy=True, feature_range=(0, 1))
features = scaler.fit_transform(features)
features = pd.DataFrame(features)
X_test = features.iloc[-test_size:,:]
y_test = output['pc'][-test_size:]
print(type(X_test))
X_test = X_test.values #.as_matrix(columns = None)
X_test = X_test.reshape(X_test.shape[0:][0],1,X_test.shape[1:][0])
return X_test, y_test
# sqrt for mean squared error
def evaluation_rmse(y_test, y_pred):
result = mean_squared_error(y_test, y_pred)
result = sqrt(result)
return result
#spilt the data in train and test samples for the random tree regresssor
def split_data(features, output, num_samples):
X_train = features.iloc[:-num_samples, :]
X_test = features.iloc[-num_samples:, :]
y_train = output['pc'].iloc[:-num_samples]
y_test = output['pc'].iloc[-num_samples:]
return X_train, X_test, y_train, y_test
#make prediction (default test size 1 week)
def model_predict(model, X_test, y_test):
y_predict = model.predict(X_test)
rmse = evaluation_rmse(y_test, y_predict)
return y_predict, int(rmse)
#draw the plot with real, predicted power and their error on the same graph
def drawPlot(y_test, y_pred, score, output, name, num):
error_arr = []
for i in range(len(y_pred)):
error = abs(y_test.iloc[i] - y_pred[i])
error_arr.append(error)
plt.figure()
plt.plot(output['stamp'][-num:],error_arr, label = 'error')
plt.plot(output['stamp'][-num:],y_test, label = 'test')
plt.plot(output['stamp'][-num:],y_pred, label = 'prediction')
plt.legend()
plt.suptitle('error over time {} acc = {}'.format(name, score))
plt.show()
#### AVALIVABLE FUNCTIONS FOR MAIN ####
#which features has direct correlation with the output
def feature_correlation(df, output):
print('feature correlation')
df = df.copy(deep=True)
df['pc'] = output['pc']
corr_matrix = df.corr()
print(corr_matrix["pc"].sort_values(ascending=False))
#check the correlation betwwen the current and the past values of 'pc'
def plotcorrelation(df, output, num_of_samples):
df = df.copy(deep=True)
df['pc'] = output['pc']
|
autocorrelation_plot(df.pc[:num_of_samples])
|
pandas.plotting.autocorrelation_plot
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.