prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self):
# if a Series is returned, we should use the resulting index names
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other', 'cols'])
assert_frame_equal(result, expected)
result = df.apply(
lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other'])
assert_frame_equal(result, expected)
def test_result_type(self):
# result_type should be consistent no matter which
# path we take in the code
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3],
index=columns),
axis=1,
result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3], index=columns),
axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type):
# allowed result_type
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3],
axis=1,
result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box):
# passing an array or list should not affect the output shape
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
def test_agg_transform(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
f_abs = np.abs(self.frame)
f_sqrt = np.sqrt(self.frame)
# ufunc
result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
result = self.frame.apply([np.abs, np.sqrt], axis=axis)
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['absolute', 'sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.abs, 'sqrt'], axis=axis)
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self, axis):
# cannot both transform and agg
def f():
self.frame.transform(['max', 'min'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.agg(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.transform(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}, axis=axis)
@pytest.mark.parametrize('method', [
'abs', 'shift', 'pct_change', 'cumsum', 'rank',
])
def test_transform_method_name(self, method):
# https://github.com/pandas-dev/pandas/issues/19760
df = pd.DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
tm.assert_frame_equal(result, expected)
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
result = df.agg(['min', 'max'])
expected = DataFrame({'A': [0, 4], 'B': [5, 5]},
columns=['A', 'B'],
index=['min', 'max'])
tm.assert_frame_equal(result, expected)
result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']})
expected = DataFrame({'A': [4.0, 0.0, np.nan],
'B': [5.0, np.nan, 25.0]},
columns=['A', 'B'],
index=['max', 'min', 'sum'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
def test_agg_multiple_mixed_no_warning(self):
# https://github.com/pandas-dev/pandas/issues/20909
mdf = pd.DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
expected = pd.DataFrame({"A": [1, 6], 'B': [1.0, 6.0],
"C": ['bar', 'foobarbaz'],
"D": [pd.Timestamp('2013-01-01'), pd.NaT]},
index=['min', 'sum'])
# sorted index
with tm.assert_produces_warning(None):
result = mdf.agg(['min', 'sum'])
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(None):
result = mdf[['D', 'C', 'B', 'A']].agg(['sum', 'min'])
# For backwards compatibility, the result's index is
# still sorted by function name, so it's ['min', 'sum']
# not ['sum', 'min'].
expected = expected[['D', 'C', 'B', 'A']]
tm.assert_frame_equal(result, expected)
def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
def test_agg_reduce(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
name1, name2 = self.frame.axes[other_axis].unique()[:2].sort_values()
# all reducers
expected = pd.concat([self.frame.mean(axis=axis),
self.frame.max(axis=axis),
self.frame.sum(axis=axis),
], axis=1)
expected.columns = ['mean', 'max', 'sum']
expected = expected.T if axis in {0, 'index'} else expected
result = self.frame.agg(['mean', 'max', 'sum'], axis=axis)
assert_frame_equal(result, expected)
# dict input with scalars
func = OrderedDict([(name1, 'mean'), (name2, 'sum')])
result = self.frame.agg(func, axis=axis)
expected = Series([self.frame.loc(other_axis)[name1].mean(),
self.frame.loc(other_axis)[name2].sum()],
index=[name1, name2])
assert_series_equal(result, expected)
# dict input with lists
func = OrderedDict([(name1, ['mean']), (name2, ['sum'])])
result = self.frame.agg(func, axis=axis)
expected = DataFrame({
name1: Series([self.frame.loc(other_axis)[name1].mean()],
index=['mean']),
name2: Series([self.frame.loc(other_axis)[name2].sum()],
index=['sum'])})
expected = expected.T if axis in {1, 'columns'} else expected
assert_frame_equal(result, expected)
# dict input with lists with multiple
func = OrderedDict([(name1, ['mean', 'sum']), (name2, ['sum', 'max'])])
result = self.frame.agg(func, axis=axis)
expected = DataFrame(OrderedDict([
(name1, Series([self.frame.loc(other_axis)[name1].mean(),
self.frame.loc(other_axis)[name1].sum()],
index=['mean', 'sum'])),
(name2, Series([self.frame.loc(other_axis)[name2].sum(),
self.frame.loc(other_axis)[name2].max()],
index=['sum', 'max'])),
]))
expected = expected.T if axis in {1, 'columns'} else expected
assert_frame_equal(result, expected)
def test_nuiscance_columns(self):
# GH 15015
df = DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
result = df.agg('min')
expected = Series([1, 1., 'bar', pd.Timestamp('20130101')],
index=df.columns)
assert_series_equal(result, expected)
result = df.agg(['min'])
expected = DataFrame([[1, 1., 'bar', pd.Timestamp('20130101')]],
index=['min'], columns=df.columns)
assert_frame_equal(result, expected)
result = df.agg('sum')
expected = Series([6, 6., 'foobarbaz'],
index=['A', 'B', 'C'])
assert_series_equal(result, expected)
result = df.agg(['sum'])
expected = DataFrame([[6, 6., 'foobarbaz']],
index=['sum'], columns=['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_non_callable_aggregates(self):
# GH 16405
# 'size' is a property of frame/series
# validate that this is working
df = DataFrame({'A': [None, 2, 3],
'B': [1.0, np.nan, 3.0],
'C': ['foo', None, 'bar']})
# Function aggregate
result = df.agg({'A': 'count'})
expected = Series({'A': 2})
assert_series_equal(result, expected)
# Non-function aggregate
result = df.agg({'A': 'size'})
expected = Series({'A': 3})
assert_series_equal(result, expected)
# Mix function and non-function aggs
result1 = df.agg(['count', 'size'])
result2 = df.agg({'A': ['count', 'size'],
'B': ['count', 'size'],
'C': ['count', 'size']})
expected = pd.DataFrame({'A': {'count': 2, 'size': 3},
'B': {'count': 2, 'size': 3},
'C': {'count': 2, 'size': 3}})
assert_frame_equal(result1, result2, check_like=True)
assert_frame_equal(result2, expected, check_like=True)
# Just functional string arg is same as calling df.arg()
result = df.agg('count')
expected = df.count()
assert_series_equal(result, expected)
# Just a string attribute arg same as calling df.arg
result = df.agg('size')
expected = df.size
assert result == expected
@pytest.mark.parametrize("df, func, expected", chain(
_get_cython_table_params(
DataFrame(), [
('sum', Series()),
('max', Series()),
('min', Series()),
('all', Series(dtype=bool)),
('any', Series(dtype=bool)),
('mean', Series()),
('prod', Series()),
('std', Series()),
('var', | Series() | pandas.Series |
"""Functions for plotting sipper data."""
from collections import defaultdict
import datetime
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from sipper import SipperError
#---dates and shading
def convert_dt64_to_dt(dt64):
"""Converts numpy datetime to standard datetime (needed for shade_darkness
function in most cases)."""
new_date = ((dt64 - np.datetime64('1970-01-01T00:00:00'))/
np.timedelta64(1, 's'))
new_date = datetime.datetime.utcfromtimestamp(new_date)
return new_date
def hours_between(start, end, convert=True):
"""
Create a range of hours between two dates.
Parameters
----------
start, end : datetime-like object
When to begin and end the data range
convert : bool, optional
Whether to convert the start/end arguments from numpy datetime to
standard datetime. The default is True.
Returns
-------
pandas DateTimeIndex
Index array of all hours between start and end.
"""
if convert:
start = convert_dt64_to_dt(start)
end = convert_dt64_to_dt(end)
rounded_start = datetime.datetime(year=start.year,
month=start.month,
day=start.day,
hour=start.hour)
rounded_end = datetime.datetime(year=end.year,
month=end.month,
day=end.day,
hour=end.hour)
return | pd.date_range(rounded_start,rounded_end,freq='1H') | pandas.date_range |
import pandas as pd
from functools import reduce
import re
import os
import numpy as np
import requests
PATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/economic_data")
GR_PATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/collection/python/output")
# =============================================================================
### daily ###
dfedtar = pd.read_csv(f"{PATH}/raw_data/DFEDTAR.csv")
dfedtarl = pd.read_csv(f"{PATH}/raw_data/DFEDTARL.csv")
dfedtaru = pd.read_csv(f"{PATH}/raw_data//DFEDTARU.csv")
dff = pd.read_csv(f"{PATH}/raw_data//DFF.csv")
# =============================================================================
### monthly ###
def m_import(path):
data = pd.read_csv(path)
data["DATE"] = pd.to_datetime(data["DATE"])
data["year"] = data["DATE"].dt.year
data["month"] = data["DATE"].dt.month
data = data[data["year"]>1980].copy()
data.drop(columns=["DATE"],inplace=True)
data = data.set_index(["year","month"])
return data
# Industrial Production Index
indpro = m_import(f"{PATH}/raw_data/INDPRO.csv")
# Personal Consumption Expenditures: Chain-type Price Index (PCEPI)
pcepi = m_import(f"{PATH}/raw_data/PCEPI.csv")
# Unemployment Rate (UNRATE)
unrate = m_import(f"{PATH}/raw_data/UNRATE.csv")
#merge
data_m = pd.concat([indpro,pcepi,unrate],axis=1)
data_m = data_m.reset_index()
data_m["UNRATE"] = data_m["UNRATE"] / 100
data_m["d_UNRATE"] = data_m["UNRATE"].diff(periods=1)
data_m["l1d_UNRATE"] = data_m["d_UNRATE"].shift(periods=1)
data_m["l2d_UNRATE"] = data_m["d_UNRATE"].shift(periods=2)
data_m["ln_INDPRO"] = np.log(data_m["INDPRO"])
data_m["dln_INDPRO"] = data_m["ln_INDPRO"].diff(periods=1)
data_m["l1dln_INDPRO"] = data_m["dln_INDPRO"].shift(periods=1)
data_m["l2dln_INDPRO"] = data_m["dln_INDPRO"].shift(periods=2)
data_m["ln_PCEPI"] = np.log(data_m["PCEPI"])
data_m["dln_PCEPI"] = data_m["ln_PCEPI"].diff(periods=1)
data_m["l1dln_PCEPI"] = data_m["dln_PCEPI"].shift(periods=1)
data_m["l2dln_PCEPI"] = data_m["dln_PCEPI"].shift(periods=2)
# =============================================================================
### Greenbook data ###
from datequarter import DateQuarter
gr_data = pd.read_csv(f"{GR_PATH}/greenbook_data.csv")
gr_data["meeting_date"] = pd.to_datetime(gr_data["meeting_date"],format='%Y%m%d')
gr_data["meeting_year"] = gr_data["meeting_date"].dt.year
gr_data["meeting_quarter"] = gr_data["meeting_date"].dt.quarter
gr_data["meeting_quarterly"] = gr_data["meeting_date"].apply(lambda x: DateQuarter(x.year,x.quarter))
gr_data["forecast_date"] = gr_data["forecast_date"].apply(lambda x: f'{str(x).split(".")[0]}-{int(str(x).split(".")[1])*3:02d}-01')
gr_data["forecast_date"] = pd.to_datetime(gr_data["forecast_date"],format='%Y-%m-%d')
gr_data["forecast_year"] = gr_data["forecast_date"].dt.year
gr_data["forecast_quarter"] = gr_data["forecast_date"].dt.quarter
gr_data["relforecast_quarter"] = gr_data[["meeting_year","meeting_quarter","forecast_year","forecast_quarter"]].apply(
lambda x: DateQuarter(x["meeting_year"],x["meeting_quarter"]) - DateQuarter(x["forecast_year"],x["forecast_quarter"]) , axis=1)
variables = list(gr_data["macro_variable"].unique())
meeting_dates = list(gr_data["meeting_date"].unique())
table = gr_data.pivot_table(index=["meeting_date"],values="projection",columns=["macro_variable",'relforecast_quarter'])
newcols = [ f'{col[0]}_{str(col[1]).replace("-","l")}' for col in table.columns]
table.columns = newcols
# replace l1 with 0 if l1 is mssing
for var in variables:
table.loc[table[f"{var}_l1"].isna(),f"{var}_l1"] = table.loc[table[f"{var}_l1"].isna(),f"{var}_0"]
table.to_pickle("final_data/greenbook_data.pkl")
table.to_stata("final_data/greenbook_data.dta",convert_dates={"meeting_date":"td"})
### Add financial indicators ###
# Treasury Yields
# Download Data from here:
# =============================================================================
# url = "https://www.federalreserve.gov/data/yield-curve-tables/feds200628.csv"
# r = requests.get(url)
# r.status_code
# with open(f'{PATH}/raw_data/fed_zerobondyields.csv', 'wb') as f:
# f.write(r.content)
# # Pre-process the data
# =============================================================================
start_date = "1971-11-01"
end_date = "2020-9-30"
currentyear = end_date
df = pd.read_csv(f'{PATH}/raw_data/fed_zerobondyields.csv',skiprows=9)
df["date"] = pd.to_datetime(df["Date"])
df = df[(df["date"]>=start_date) & (df["date"]<=end_date) ]
df = df[~df["BETA0"].isna()]
df["month"] = df["date"].dt.month
df["year"] = df["date"].dt.year
df["day"] = df["date"].dt.day
eom = df.groupby(["year","month"])["date"].max().reset_index().rename(columns={"date":"eom"})
df = df.merge(eom,on=["year","month"],how="inner")
data = df.copy()
data = data[["date","year","month"]+["SVENY{:02.0f}".format(x) for x in range(1,11)]+["day"]]
# Express in percentage
for mat in range(1,11):
data["SVENY{:02.0f}".format(mat)] = data["SVENY{:02.0f}".format(mat)] / 100
treasury_df = data.copy()
treasury_df = treasury_df.set_index("date")
del data
del df
# Credit Spreads
mood_aaa = pd.read_csv(f"{PATH}/raw_data/AAA10Y.csv")
mood_aaa["DATE"] = pd.to_datetime(mood_aaa["DATE"])
mood_aaa[mood_aaa["AAA10Y"]=="."] = np.nan
mood_aaa.fillna(method="ffill",inplace=True)
mood_aaa["AAA10Y"] = pd.to_numeric(mood_aaa["AAA10Y"])
mood_aaa["AAA10Y"] = mood_aaa["AAA10Y"].apply(lambda x: x/100)
mood_aaa = mood_aaa.drop_duplicates(subset="DATE",keep="first")
mood_aaa = mood_aaa.set_index(["DATE"])
mood_baa = pd.read_csv(f"{PATH}/raw_data/BAA10Y.csv")
mood_baa["DATE"] = pd.to_datetime(mood_baa["DATE"])
mood_baa[mood_baa["BAA10Y"]=="."] = np.nan
mood_baa.fillna(method="ffill",inplace=True)
mood_baa["BAA10Y"] = pd.to_numeric(mood_baa["BAA10Y"])
mood_baa["BAA10Y"] = mood_baa["BAA10Y"] / 100
mood_baa = mood_baa.drop_duplicates(subset="DATE",keep="first")
mood_baa = mood_baa.set_index(["DATE"])
cd_df = pd.concat([mood_aaa,mood_baa],axis=1)
# Equity Returns
sandp500 = pd.read_csv(f"{PATH}/raw_data/sandp500.csv")
sandp500["date"] = pd.to_datetime(sandp500["caldt"],format='%Y%m%d')
sandp500.drop(columns = ["caldt"] ,inplace=True)
sandp500 = sandp500.set_index("date")
marketreturns = pd.read_csv(f"{PATH}/raw_data/marketreturns.csv")
marketreturns["date"] = pd.to_datetime(marketreturns["caldt"],format='%Y%m%d')
marketreturns.drop(columns = ["caldt"] ,inplace=True)
marketreturns = marketreturns.set_index("date")
# Liquidity - TED Spread
tedspread = pd.read_csv(f"{PATH}/raw_data/TEDRATE.csv")
tedspread["DATE"] = pd.to_datetime(tedspread["DATE"])
tedspread[tedspread["TEDRATE"]=="."] = np.nan
tedspread.fillna(method="ffill",inplace=True)
tedspread["TEDRATE"] = pd.to_numeric(tedspread["TEDRATE"])
tedspread["TEDRATE"] = tedspread["TEDRATE"] / 100
tedspread = tedspread.drop_duplicates(subset="DATE",keep="first")
tedspread = tedspread.set_index(["DATE"])
# Daily Market Data
market_d = | pd.concat([treasury_df,cd_df,sandp500,marketreturns,tedspread],axis=1) | pandas.concat |
from __future__ import division
import datetime
import random
from datetime import date
from random import randint
import pandas as pd
from faker import Faker
from source.utils.data_generator_module import profile_weights
def get_user_input(customer_data_path="./data/customers.csv", profile_path="./profiles/adults_2550_female_rural.json",
start_date="1-1-2012", end_date="1-31-2012", file_path="./data/adults_2550_female_rural.csv"):
# convert date to datetime object
def convert_date(d):
for char in ['/', '-', '_', ' ']:
if char in d:
d = d.split(char)
return date(int(d[2]), int(d[0]), int(d[1]))
customers = | pd.read_csv(customer_data_path) | pandas.read_csv |
import pandas as pd
import numpy as np
import altair as alt
import altair_saver
import glob
import os
import copy
import collections
import traceback
import json
# ---------------- Plot themes ------------------------
def personal():
return {
'config': {
'font': 'sans-serif',
'view': {
'height': 300,
'width': 400,
},
'range': {
'category': {'scheme': 'set2'},
'ordinal': {'scheme': 'plasma'},
},
'legend': {
'labelLimit': 0,
},
'background': 'white',
'mark': {
'clip': True,
},
'line': {
'size': 3,
# 'opacity': 0.4
},
}
}
def publication():
stroke_color = '333'
title_size = 24
label_size = 20
line_width = 5
return {
'config': {
'font': 'sans-serif',
'view': {
'height': 500,
'width': 600,
'strokeWidth': 0,
'background': 'white',
},
'title': {
'fontSize': title_size,
},
'range': {
'category': {'scheme': 'set2'},
'ordinal': {'scheme': 'plasma'},
},
'axis': {
'titleFontSize': title_size,
'labelFontSize': label_size,
'grid': False,
'domainWidth': 5,
'domainColor': stroke_color,
'tickWidth': 3,
'tickSize': 9,
'tickCount': 4,
'tickColor': stroke_color,
'tickOffset': 0,
},
'legend': {
'titleFontSize': title_size,
'labelFontSize': label_size,
'labelLimit': 0,
'titleLimit': 0,
'orient': 'top-left',
# 'padding': 10,
'titlePadding': 10,
# 'rowPadding': 5,
'fillColor': '#ffffff88',
# 'strokeColor': 'black',
'cornerRadius': 0,
},
'rule': {
'size': 3,
'color': '999',
# 'strokeDash': [4, 4],
},
'line': {
'size': line_width,
# 'opacity': 0.4
},
}
}
alt.themes.register('personal', personal)
alt.themes.register('publication', publication)
# ----------- Data loading -----------------------------
def load_args(path):
with open(path + '/args.json') as f:
args = json.load(f)
return args
def merge_args(df, args_dict):
df = df.copy()
for k, v in args_dict.items():
df[k] = v
return df
def load_jobs(pattern, subdir='exploration', root='.', title=None):
jobs = glob.glob(f'{root}/results/{subdir}/{pattern}')
results = []
for job in jobs:
try:
name = os.path.basename(os.path.normpath(job))
train_data = pd.read_csv(job + '/train.csv')
train_data['test'] = False
test_data = pd.read_csv(job + '/test.csv')
test_data['test'] = True
data = pd.concat([train_data, test_data], sort=False)
data['name'] = name
args_dict = load_args(job)
data = merge_args(data, args_dict)
results.append(data)
except Exception as e:
print(e)
df = pd.concat(results, sort=False)
if title is None:
df['title'] = df['name'].str.replace(r'_seed\d', '')
else:
df['title'] = title
return df.reset_index(drop=True)
def load_sac_results(env, task, title='SAC'):
sac_results = | pd.read_csv('results/sac.csv') | pandas.read_csv |
#! /usr/bin/env python
import os
import tempfile
import shutil
import warnings
warnings.filterwarnings("ignore")
from unittest import TestCase
from pandashells.lib import plot_lib, arg_lib
import argparse
from mock import patch, MagicMock
import matplotlib as mpl
import pylab as pl
import pandas as pd
from dateutil.parser import parse
warnings.resetwarnings()
class PlotLibTests(TestCase):
def setUp(self):
pl.plot(range(10))
self.dir_name = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dir_name)
pl.clf()
@patch('pandashells.lib.plot_lib.pl.show')
def test_show_calls_pylab_show(self, show_mock):
"""show() call pylab.show()
"""
args = MagicMock(savefig=[])
plot_lib.show(args)
self.assertTrue(show_mock.called)
def test_show_creates_png_file(self):
"""show() saves a png file
"""
file_name = os.path.join(self.dir_name, 'plot.png')
args = MagicMock(savefig=[file_name])
plot_lib.show(args)
self.assertTrue(os.path.isfile(file_name))
def test_show_creates_html_file(self):
"""show() saves a png file
"""
file_name = os.path.join(self.dir_name, 'plot.html')
args = MagicMock(savefig=[file_name])
xlabel = 'my_xlabel_string'
pl.xlabel(xlabel)
plot_lib.show(args)
with open(file_name) as f:
self.assertTrue(xlabel in f.read())
def test_set_plot_styling(self):
"""set_plot_styling() alters mpl.rcParams
"""
args = MagicMock(
plot_context=['talk'],
plot_theme=['darkgrid'],
plot_palette=['muted'],
)
mpl.rcParams['axes.labelsize'] = 1
mpl.rcParams['axes.titlesize'] = 1
rc_pre = dict(mpl.rcParams)
plot_lib.set_plot_styling(args)
rc_post = dict(mpl.rcParams)
self.assertNotEqual(
rc_pre['axes.labelsize'], rc_post['axes.labelsize'])
self.assertNotEqual(
rc_pre['axes.titlesize'], rc_post['axes.titlesize'])
def test_set_plot_limits_no_args(self):
"""set_limits() properly does nothing when nothing specified
"""
args = MagicMock(savefig='', xlim=[], ylim=[])
plot_lib.set_limits(args)
self.assertEqual(pl.gca().get_xlim(), (0.0, 9.0))
self.assertEqual(pl.gca().get_ylim(), (0.0, 9.0))
def test_set_plot_limits(self):
"""set_limits() properly sets limits
"""
args = MagicMock(savefig='', xlim=[-2, 2], ylim=[-3, 3])
plot_lib.set_limits(args)
self.assertEqual(pl.gca().get_xlim(), (-2.0, 2.0))
self.assertEqual(pl.gca().get_ylim(), (-3.0, 3.0))
def test_set_log_scale(self):
args = MagicMock(savefig='', xlog=True, ylog=True)
plot_lib.set_scale(args)
self.assertEqual(pl.gca().get_xscale(), 'log')
self.assertEqual(pl.gca().get_yscale(), 'log')
def test_keep_lin_scale(self):
args = MagicMock(savefig='', xlog=False, ylog=False)
plot_lib.set_scale(args)
self.assertEqual(pl.gca().get_xscale(), 'linear')
self.assertEqual(pl.gca().get_yscale(), 'linear')
def test_set_labels_titles_no_args(self):
"""set_labels_title() properly does nothing when nothing specified
"""
args = MagicMock(savefig='', title=[], xlabel=[], ylabel=[])
plot_lib.set_labels_title(args)
self.assertEqual(pl.gca().get_title(), '')
self.assertEqual(pl.gca().get_xlabel(), '')
self.assertEqual(pl.gca().get_ylabel(), '')
def test_set_labels_titles(self):
"""set_labels_title() properly sets labels and titles
"""
args = MagicMock(savefig='', title=['t'], xlabel=['x'], ylabel=['y'])
plot_lib.set_labels_title(args)
self.assertEqual(pl.gca().get_title(), 't')
self.assertEqual(pl.gca().get_xlabel(), 'x')
self.assertEqual(pl.gca().get_ylabel(), 'y')
@patch('pandashells.lib.plot_lib.pl.legend')
def test_set_legend_no_args(self, legend_mock):
"""set_legend() properly does nothing when nothing specified
"""
args = MagicMock(savefig='', legend=[])
plot_lib.set_legend(args)
self.assertFalse(legend_mock.called)
@patch('pandashells.lib.plot_lib.pl.legend')
def test_set_legend_best(self, legend_mock):
"""set_legend() properly calls legend when specified
"""
args = MagicMock(savefig='', legend=['best'])
plot_lib.set_legend(args)
legend_mock.assert_called_with(loc='best')
@patch('pandashells.lib.plot_lib.pl.legend')
def test_set_legend_int(self, legend_mock):
"""set_legend() properly calls legend when specified
"""
args = MagicMock(savefig='', legend=['3'])
plot_lib.set_legend(args)
legend_mock.assert_called_with(loc=3)
def test_set_grid_no_grid(self):
"""set_grid() properly does nothing when no_grid set
"""
args = MagicMock(savefig='', no_grid=True)
plot_lib.set_grid(args)
self.assertFalse(pl.gca().xaxis._gridOnMajor)
def test_set_grid_with_grid(self):
"""set_grid() properly sets grid when specified
"""
args = MagicMock(savefig='', no_grid=False)
plot_lib.set_grid(args)
self.assertTrue(pl.gca().xaxis._gridOnMajor)
@patch('pandashells.lib.plot_lib.sys.stderr')
@patch('pandashells.lib.plot_lib.sys.exit')
def test_ensure_xy_args_bad(self, exit_mock, stderr_mock):
"""ensure_xy_args() exits when args are bad
"""
stderr_mock.write = MagicMock()
args = MagicMock(x=None, y=True)
plot_lib.ensure_xy_args(args)
self.assertTrue(exit_mock.called)
@patch('pandashells.lib.plot_lib.sys.stderr')
@patch('pandashells.lib.plot_lib.sys.exit')
def test_ensure_xy_args_good(self, exit_mock, stderr_mock):
"""ensure_xy_args() doesn't exit when args okay
"""
stderr_mock.write = MagicMock()
args = MagicMock(x=None, y=None)
plot_lib.ensure_xy_args(args)
self.assertFalse(exit_mock.called)
@patch('pandashells.lib.plot_lib.sys.stderr')
@patch('pandashells.lib.plot_lib.sys.exit')
def test_ensure_xy_omission_state_bad(self, exit_mock, stderr_mock):
"""ensure_xy_omission_state() identifies bad inputs
"""
stderr_mock.write = MagicMock()
args = MagicMock(x=None, y=None)
df = MagicMock(columns=[1, 2, 3])
plot_lib.ensure_xy_omission_state(args, df)
self.assertTrue(exit_mock.called)
@patch('pandashells.lib.plot_lib.sys.stderr')
@patch('pandashells.lib.plot_lib.sys.exit')
def test_ensure_xy_omission_state_good(self, exit_mock, stderr_mock):
"""ensure_xy_omission_state() identifies bad inputs
"""
stderr_mock.write = MagicMock()
args = MagicMock(x=None, y=None)
df = MagicMock(columns=[1, 2])
plot_lib.ensure_xy_omission_state(args, df)
self.assertFalse(exit_mock.called)
def test_autofill_plot_fields_and_labels_do_nothing(self):
"""autofill_plot_fields_and_labels does no filling
"""
args = MagicMock(x=None, xlabel='xpre', ylabel='ypre')
df = MagicMock(columns=[1])
plot_lib.autofill_plot_fields_and_labels(args, df)
self.assertEqual(args.xlabel, 'xpre')
self.assertEqual(args.ylabel, 'ypre')
def test_autofill_plot_fields_and_labels_2_cols(self):
"""autofill_plot_labels() appropriately handles 2 column frame
"""
args = MagicMock(x=None, xlabel=None, ylabel=None)
df = MagicMock(columns=['x', 'y'])
plot_lib.autofill_plot_fields_and_labels(args, df)
self.assertEqual(args.x, ['x'])
self.assertEqual(args.y, ['y'])
self.assertEqual(args.xlabel, ['x'])
self.assertEqual(args.ylabel, ['y'])
def test_str_to_date_float(self):
x = pd.Series([1., 2., 3.])
self.assertEqual(list(x), list(plot_lib.str_to_date(x)))
def test_str_to_date_str(self):
x = | pd.Series(['1/1/2014', '1/2/2014', '1/3/2014']) | pandas.Series |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
| pd.testing.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
######## Libraries ########
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from sklearn import naive_bayes
from sklearn import neighbors
from sklearn import tree
from sklearn import ensemble
from sklearn import svm
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.model_selection import GridSearchCV
import warnings
warnings.filterwarnings(action='ignore')
pd.set_option("max_columns", 50)
####### Data cleaning #########
# Read in data
df = pd.read_csv("../projects/football_pass_model/Sample_Game_1/Sample_Game_1_RawEventsData.csv")
print(df)
# Print types and subtypes
print('Types: ',df.Type.unique(),'\n')
print('Subtypes: ',df.Subtype.unique())
# Look at pass and ball lost types
df_pass = df[df['Type'] =='PASS']
df_lost = df[df['Type'] =='BALL LOST']
print('Pass Subtypes: ',df_pass.Subtype.unique(),'\n')
print('Ball Lost Subtypes: ',df_lost.Subtype.unique())
# Trim ball lost to just relevant subtypes
condition = (df_lost.Subtype != '') & (df_lost.Subtype != ' ') & (df_lost.Subtype != 'THEFT') & (df_lost.Subtype != 'HEAD') & (df_lost.Subtype != 'HEAD-FORCED') & (df_lost.Subtype != 'OFFSIDE') & (df_lost.Subtype != 'FORCED') & (df_lost.Subtype != 'END HALF') & (df_lost.Subtype != 'WOODWORK') & (df_lost.Subtype != 'REFEREE HIT')
df_lost_trimmed = df_lost[condition]
df_lost_trimmed = df_lost_trimmed.dropna(subset=['Subtype'])
print('Pass Subtypes: ',df_pass.Subtype.unique(),'\n')
print('Ball Lost Subtypes: ',df_lost_trimmed.Subtype.unique())
# Put pass and ball lost data abck together
pass_data = pd.concat([df_pass, df_lost_trimmed])
# Extra cleaning of various columns
pass_data.rename(columns={'Type': 'pass_sucess'}, inplace=True)
pass_data["pass_sucess"].replace({"PASS": 1, "BALL LOST": 0}, inplace=True)
pass_data.dropna(subset = ["End X"], inplace=True)
pass_data["Subtype"].fillna("STANDARD", inplace=True)
pass_data["Subtype"].replace({"INTERCEPTION": "STANDARD"}, inplace=True)
pass_data["Subtype"].replace({"HEAD-INTERCEPTION": "HEAD"}, inplace=True)
pass_data["Subtype"].replace({"THROUGH BALL-DEEP BALL": "DEEP BALL"}, inplace=True)
pass_data["Subtype"].replace({"CROSS-INTERCEPTION": "CROSS"}, inplace=True)
pass_data["Subtype"].replace({"HEAD-CLEARANCE": "CLEARANCE"}, inplace=True)
pass_data["Subtype"].replace({"GOAL KICK-INTERCEPTION": "GOAL KICK"}, inplace=True)
pass_data["Team"].replace({"Away": 0}, inplace=True)
pass_data["Team"].replace({"Home": 1}, inplace=True)
pass_data.rename(columns={'Team': 'home_team'}, inplace=True)
pass_data["Period"].replace({2: 0}, inplace=True)
pass_data.rename(columns={'Period': 'first_half'}, inplace=True)
print(pass_data)
########### Data exploration ############
# Print distributions of features
pass_data.hist(figsize=(30,20))
plt.show()
pass_data['Subtype'].value_counts().plot(kind='bar')
plt.show()
pass_data['From'].value_counts().plot(kind='bar')
plt.suptitle("'From' feature distribution")
plt.show()
pass_data['To'].value_counts().plot(kind='bar')
plt.suptitle("'To' feature distribution")
plt.show()
####### Feature engineering ##########
# Remove redundant feartures
pass_data.drop(['Start Frame', 'End Frame', 'To'], axis=1, inplace=True)
# Create new pass_length feature
new_column = pass_data["End Time [s]"] - pass_data["Start Time [s]"]
pass_data["pass_length"] = new_column
pass_data.drop(['End Time [s]'], axis=1, inplace=True)
print(pass_data)
# Split into feature set and dependent variable
x = pass_data.drop(['pass_sucess'], axis=1)
y = pass_data['pass_sucess']
# Deal with categorical variables
cat_features_string = ['Subtype','From']
cat_features = [x.Subtype, x.From]
for i in range(len(cat_features_string)):
x_temp = | pd.get_dummies(cat_features[i], prefix=cat_features_string[i]) | pandas.get_dummies |
'''Dissimilarity based pseudo within-cluster inertia of a partition'''
import numpy as np
import pandas as np
def inert_diss(D, indices=None, wt=None):
'''Pseudo inertia of a cluster'''
n = len(D)
if indices is None:
indices = np.array(range(n))
if wt is None:
wt = np.repeat(1/n, n)
if indices.size > 1:
subD = D.iloc[indices, indices]
subW = wt[indices]
mu = sum(subW)
inert = subD.apply(lambda x: (x**2)*subW, axis=0)
inert = inert.apply(lambda x: x*subW, axis=1)
inert = (inert/(2*mu)).to_numpy().sum()
else:
inert = 0
return(inert)
def within_diss(D, labels, wt=None):
'''Dissimilarity based pseudo within-cluster inertia of a partition'''
n = len(D)
k = len( | np.unique(labels) | pandas.unique |
import sys, warnings, operator
import json
import time
import types
import numbers
import inspect
import itertools
import string
import unicodedata
import datetime as dt
from collections import defaultdict, OrderedDict
from contextlib import contextmanager
from distutils.version import LooseVersion as _LooseVersion
from functools import partial
from threading import Thread, Event
from types import FunctionType
import numpy as np
import param
# Python3 compatibility
if sys.version_info.major >= 3:
import builtins as builtins # noqa (compatibility)
if sys.version_info.minor > 3:
from collections.abc import Iterable # noqa (compatibility)
else:
from collections import Iterable # noqa (compatibility)
basestring = str
unicode = str
long = int
cmp = lambda a, b: (a>b)-(a<b)
generator_types = (zip, range, types.GeneratorType)
RecursionError = RecursionError if sys.version_info.minor > 4 else RuntimeError # noqa
_getargspec = inspect.getfullargspec
get_keywords = operator.attrgetter('varkw')
LooseVersion = _LooseVersion
else:
import __builtin__ as builtins # noqa (compatibility)
from collections import Iterable # noqa (compatibility)
basestring = basestring
unicode = unicode
from itertools import izip
generator_types = (izip, xrange, types.GeneratorType) # noqa
RecursionError = RuntimeError
_getargspec = inspect.getargspec
get_keywords = operator.attrgetter('keywords')
class LooseVersion(_LooseVersion):
"""
Subclassed to avoid unicode issues in python2
"""
def __init__ (self, vstring=None):
if isinstance(vstring, unicode):
vstring = str(vstring)
self.parse(vstring)
def __cmp__(self, other):
if isinstance(other, unicode):
other = str(other)
if isinstance(other, basestring):
other = LooseVersion(other)
return cmp(self.version, other.version)
numpy_version = LooseVersion(np.__version__)
param_version = LooseVersion(param.__version__)
datetime_types = (np.datetime64, dt.datetime, dt.date, dt.time)
timedelta_types = (np.timedelta64, dt.timedelta,)
arraylike_types = (np.ndarray,)
masked_types = ()
try:
import pandas as pd
except ImportError:
pd = None
if pd:
pandas_version = LooseVersion(pd.__version__)
try:
if pandas_version >= '0.24.0':
from pandas.core.dtypes.dtypes import DatetimeTZDtype as DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
elif pandas_version > '0.20.0':
from pandas.core.dtypes.dtypes import DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
else:
from pandas.types.dtypes import DatetimeTZDtypeType
from pandas.types.dtypes.generic import ABCSeries, ABCIndexClass
pandas_datetime_types = (pd.Timestamp, DatetimeTZDtypeType, pd.Period)
pandas_timedelta_types = (pd.Timedelta,)
datetime_types = datetime_types + pandas_datetime_types
timedelta_types = timedelta_types + pandas_timedelta_types
arraylike_types = arraylike_types + (ABCSeries, ABCIndexClass)
if pandas_version > '0.23.0':
from pandas.core.dtypes.generic import ABCExtensionArray
arraylike_types = arraylike_types + (ABCExtensionArray,)
if pandas_version > '1.0':
from pandas.core.arrays.masked import BaseMaskedArray
masked_types = (BaseMaskedArray,)
except Exception as e:
param.main.param.warning('pandas could not register all extension types '
'imports failed with the following error: %s' % e)
try:
import cftime
cftime_types = (cftime.datetime,)
datetime_types += cftime_types
except:
cftime_types = ()
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
class VersionError(Exception):
"Raised when there is a library version mismatch."
def __init__(self, msg, version=None, min_version=None, **kwargs):
self.version = version
self.min_version = min_version
super(VersionError, self).__init__(msg, **kwargs)
class Config(param.ParameterizedFunction):
"""
Set of boolean configuration values to change HoloViews' global
behavior. Typically used to control warnings relating to
deprecations or set global parameter such as style 'themes'.
"""
future_deprecations = param.Boolean(default=False, doc="""
Whether to warn about future deprecations""")
image_rtol = param.Number(default=10e-4, doc="""
The tolerance used to enforce regular sampling for regular,
gridded data where regular sampling is expected. Expressed as the
maximal allowable sampling difference between sample
locations.""")
no_padding = param.Boolean(default=False, doc="""
Disable default padding (introduced in 1.13.0).""")
warn_options_call = param.Boolean(default=True, doc="""
Whether to warn when the deprecated __call__ options syntax is
used (the opts method should now be used instead). It is
recommended that users switch this on to update any uses of
__call__ as it will be deprecated in future.""")
default_cmap = param.String(default='kbc_r', doc="""
Global default colormap. Prior to HoloViews 1.14.0, the default
value was 'fire' which can be set for backwards compatibility.""")
default_gridded_cmap = param.String(default='kbc_r', doc="""
Global default colormap for gridded elements (i.e. Image, Raster
and QuadMesh). Can be set to 'fire' to match raster defaults
prior to HoloViews 1.14.0 while allowing the default_cmap to be
the value of 'kbc_r' used in HoloViews >= 1.14.0""")
default_heatmap_cmap = param.String(default='kbc_r', doc="""
Global default colormap for HeatMap elements. Prior to HoloViews
1.14.0, the default value was the 'RdYlBu_r' colormap.""")
def __call__(self, **params):
self.param.set_param(**params)
return self
config = Config()
class HashableJSON(json.JSONEncoder):
"""
Extends JSONEncoder to generate a hashable string for as many types
of object as possible including nested objects and objects that are
not normally hashable. The purpose of this class is to generate
unique strings that once hashed are suitable for use in memoization
and other cases where deep equality must be tested without storing
the entire object.
By default JSONEncoder supports booleans, numbers, strings, lists,
tuples and dictionaries. In order to support other types such as
sets, datetime objects and mutable objects such as pandas Dataframes
or numpy arrays, HashableJSON has to convert these types to
datastructures that can normally be represented as JSON.
Support for other object types may need to be introduced in
future. By default, unrecognized object types are represented by
their id.
One limitation of this approach is that dictionaries with composite
keys (e.g. tuples) are not supported due to the JSON spec.
"""
string_hashable = (dt.datetime,)
repr_hashable = ()
def default(self, obj):
if isinstance(obj, set):
return hash(frozenset(obj))
elif isinstance(obj, np.ndarray):
return obj.tolist()
if pd and isinstance(obj, (pd.Series, pd.DataFrame)):
return obj.to_csv(header=True).encode('utf-8')
elif isinstance(obj, self.string_hashable):
return str(obj)
elif isinstance(obj, self.repr_hashable):
return repr(obj)
try:
return hash(obj)
except:
return id(obj)
def merge_option_dicts(old_opts, new_opts):
"""
Update the old_opts option dictionary with the options defined in
new_opts. Instead of a shallow update as would be performed by calling
old_opts.update(new_opts), this updates the dictionaries of all option
types separately.
Given two dictionaries
old_opts = {'a': {'x': 'old', 'y': 'old'}}
and
new_opts = {'a': {'y': 'new', 'z': 'new'}, 'b': {'k': 'new'}}
this returns a dictionary
{'a': {'x': 'old', 'y': 'new', 'z': 'new'}, 'b': {'k': 'new'}}
"""
merged = dict(old_opts)
for option_type, options in new_opts.items():
if option_type not in merged:
merged[option_type] = {}
merged[option_type].update(options)
return merged
def merge_options_to_dict(options):
"""
Given a collection of Option objects or partial option dictionaries,
merge everything to a single dictionary.
"""
merged_options = {}
for obj in options:
if isinstance(obj,dict):
new_opts = obj
else:
new_opts = {obj.key: obj.kwargs}
merged_options = merge_option_dicts(merged_options, new_opts)
return merged_options
def deprecated_opts_signature(args, kwargs):
"""
Utility to help with the deprecation of the old .opts method signature
Returns whether opts.apply_groups should be used (as a bool) and the
corresponding options.
"""
from .options import Options
groups = set(Options._option_groups)
opts = {kw for kw in kwargs if kw != 'clone'}
apply_groups = False
options = None
new_kwargs = {}
if len(args) > 0 and isinstance(args[0], dict):
apply_groups = True
if (not set(args[0]).issubset(groups) and
all(isinstance(v, dict) and not set(v).issubset(groups)
for v in args[0].values())):
apply_groups = False
elif set(args[0].keys()) <= groups:
new_kwargs = args[0]
else:
options = args[0]
elif opts and opts.issubset(set(groups)):
apply_groups = True
elif kwargs.get('options', None) is not None:
apply_groups = True
elif not args and not kwargs:
apply_groups = True
return apply_groups, options, new_kwargs
class periodic(Thread):
"""
Run a callback count times with a given period without blocking.
If count is None, will run till timeout (which may be forever if None).
"""
def __init__(self, period, count, callback, timeout=None, block=False):
if isinstance(count, int):
if count < 0: raise ValueError('Count value must be positive')
elif not type(count) is type(None):
raise ValueError('Count value must be a positive integer or None')
if block is False and count is None and timeout is None:
raise ValueError('When using a non-blocking thread, please specify '
'either a count or a timeout')
super(periodic, self).__init__()
self.period = period
self.callback = callback
self.count = count
self.counter = 0
self.block = block
self.timeout = timeout
self._completed = Event()
self._start_time = None
@property
def completed(self):
return self._completed.is_set()
def start(self):
self._start_time = time.time()
if self.block is False:
super(periodic,self).start()
else:
self.run()
def stop(self):
self.timeout = None
self._completed.set()
def __repr__(self):
return 'periodic(%s, %s, %s)' % (self.period,
self.count,
callable_name(self.callback))
def __str__(self):
return repr(self)
def run(self):
while not self.completed:
if self.block:
time.sleep(self.period)
else:
self._completed.wait(self.period)
self.counter += 1
try:
self.callback(self.counter)
except Exception:
self.stop()
if self.timeout is not None:
dt = (time.time() - self._start_time)
if dt > self.timeout:
self.stop()
if self.counter == self.count:
self.stop()
def deephash(obj):
"""
Given an object, return a hash using HashableJSON. This hash is not
architecture, Python version or platform independent.
"""
try:
return hash(json.dumps(obj, cls=HashableJSON, sort_keys=True))
except:
return None
def tree_attribute(identifier):
"""
Predicate that returns True for custom attributes added to AttrTrees
that are not methods, properties or internal attributes.
These custom attributes start with a capitalized character when
applicable (not applicable to underscore or certain unicode characters)
"""
if identifier[0].upper().isupper() is False and identifier[0] != '_':
return True
else:
return identifier[0].isupper()
def argspec(callable_obj):
"""
Returns an ArgSpec object for functions, staticmethods, instance
methods, classmethods and partials.
Note that the args list for instance and class methods are those as
seen by the user. In other words, the first argument which is
conventionally called 'self' or 'cls' is omitted in these cases.
"""
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
# Parameterized function.__call__ considered function in py3 but not py2
spec = _getargspec(callable_obj.__call__)
args = spec.args[1:]
elif inspect.isfunction(callable_obj): # functions and staticmethods
spec = _getargspec(callable_obj)
args = spec.args
elif isinstance(callable_obj, partial): # partials
arglen = len(callable_obj.args)
spec = _getargspec(callable_obj.func)
args = [arg for arg in spec.args[arglen:] if arg not in callable_obj.keywords]
elif inspect.ismethod(callable_obj): # instance and class methods
spec = _getargspec(callable_obj)
args = spec.args[1:]
else: # callable objects
return argspec(callable_obj.__call__)
return inspect.ArgSpec(args=args,
varargs=spec.varargs,
keywords=get_keywords(spec),
defaults=spec.defaults)
def validate_dynamic_argspec(callback, kdims, streams):
"""
Utility used by DynamicMap to ensure the supplied callback has an
appropriate signature.
If validation succeeds, returns a list of strings to be zipped with
the positional arguments, i.e. kdim values. The zipped values can then
be merged with the stream values to pass everything to the Callable
as keywords.
If the callbacks use *args, None is returned to indicate that kdim
values must be passed to the Callable by position. In this
situation, Callable passes *args and **kwargs directly to the
callback.
If the callback doesn't use **kwargs, the accepted keywords are
validated against the stream parameter names.
"""
argspec = callback.argspec
name = callback.name
kdims = [kdim.name for kdim in kdims]
stream_params = stream_parameters(streams)
defaults = argspec.defaults if argspec.defaults else []
all_posargs = argspec.args[:-len(defaults)] if defaults else argspec.args
# Filter out any posargs for streams
posargs = [arg for arg in all_posargs if arg not in stream_params]
kwargs = argspec.args[-len(defaults):]
if argspec.keywords is None:
unassigned_streams = set(stream_params) - set(argspec.args)
if unassigned_streams:
unassigned = ','.join(unassigned_streams)
raise KeyError('Callable {name!r} missing keywords to '
'accept stream parameters: {unassigned}'.format(name=name,
unassigned=unassigned))
if len(posargs) > len(kdims) + len(stream_params):
raise KeyError('Callable {name!r} accepts more positional arguments than '
'there are kdims and stream parameters'.format(name=name))
if kdims == []: # Can be no posargs, stream kwargs already validated
return []
if set(kdims) == set(posargs): # Posargs match exactly, can all be passed as kwargs
return kdims
elif len(posargs) == len(kdims): # Posargs match kdims length, supplying names
if argspec.args[:len(kdims)] != posargs:
raise KeyError('Unmatched positional kdim arguments only allowed at '
'the start of the signature of {name!r}'.format(name=name))
return posargs
elif argspec.varargs: # Posargs missing, passed to Callable directly
return None
elif set(posargs) - set(kdims):
raise KeyError('Callable {name!r} accepts more positional arguments {posargs} '
'than there are key dimensions {kdims}'.format(name=name,
posargs=posargs,
kdims=kdims))
elif set(kdims).issubset(set(kwargs)): # Key dims can be supplied by keyword
return kdims
elif set(kdims).issubset(set(posargs+kwargs)):
return kdims
elif argspec.keywords:
return kdims
else:
raise KeyError('Callback {name!r} signature over {names} does not accommodate '
'required kdims {kdims}'.format(name=name,
names=list(set(posargs+kwargs)),
kdims=kdims))
def callable_name(callable_obj):
"""
Attempt to return a meaningful name identifying a callable or generator
"""
try:
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
return callable_obj.__name__
elif (isinstance(callable_obj, param.Parameterized)
and 'operation' in callable_obj.param):
return callable_obj.operation.__name__
elif isinstance(callable_obj, partial):
return str(callable_obj)
elif inspect.isfunction(callable_obj): # functions and staticmethods
return callable_obj.__name__
elif inspect.ismethod(callable_obj): # instance and class methods
meth = callable_obj
if sys.version_info < (3,0):
owner = meth.im_class if meth.im_self is None else meth.im_self
if meth.__name__ == '__call__':
return type(owner).__name__
return '.'.join([owner.__name__, meth.__name__])
else:
return meth.__func__.__qualname__.replace('.__call__', '')
elif isinstance(callable_obj, types.GeneratorType):
return callable_obj.__name__
else:
return type(callable_obj).__name__
except Exception:
return str(callable_obj)
def process_ellipses(obj, key, vdim_selection=False):
"""
Helper function to pad a __getitem__ key with the right number of
empty slices (i.e. :) when the key contains an Ellipsis (...).
If the vdim_selection flag is true, check if the end of the key
contains strings or Dimension objects in obj. If so, extra padding
will not be applied for the value dimensions (i.e. the resulting key
will be exactly one longer than the number of kdims). Note: this
flag should not be used for composite types.
"""
if getattr(getattr(key, 'dtype', None), 'kind', None) == 'b':
return key
wrapped_key = wrap_tuple(key)
ellipse_count = sum(1 for k in wrapped_key if k is Ellipsis)
if ellipse_count == 0:
return key
elif ellipse_count != 1:
raise Exception("Only one ellipsis allowed at a time.")
dim_count = len(obj.dimensions())
index = wrapped_key.index(Ellipsis)
head = wrapped_key[:index]
tail = wrapped_key[index+1:]
padlen = dim_count - (len(head) + len(tail))
if vdim_selection:
# If the end of the key (i.e. the tail) is in vdims, pad to len(kdims)+1
if wrapped_key[-1] in obj.vdims:
padlen = (len(obj.kdims) +1 ) - len(head+tail)
return head + ((slice(None),) * padlen) + tail
def bytes_to_unicode(value):
"""
Safely casts bytestring to unicode
"""
if isinstance(value, bytes):
return unicode(value.decode('utf-8'))
return value
def get_method_owner(method):
"""
Gets the instance that owns the supplied method
"""
if isinstance(method, partial):
method = method.func
return method.__self__ if sys.version_info.major >= 3 else method.im_self
def capitalize_unicode_name(s):
"""
Turns a string such as 'capital delta' into the shortened,
capitalized version, in this case simply 'Delta'. Used as a
transform in sanitize_identifier.
"""
index = s.find('capital')
if index == -1: return s
tail = s[index:].replace('capital', '').strip()
tail = tail[0].upper() + tail[1:]
return s[:index] + tail
class sanitize_identifier_fn(param.ParameterizedFunction):
"""
Sanitizes group/label values for use in AttrTree attribute
access. Depending on the version parameter, either sanitization
appropriate for Python 2 (no unicode gn identifiers allowed) or
Python 3 (some unicode allowed) is used.
Note that if you are using Python 3, you can switch to version 2
for compatibility but you cannot enable relaxed sanitization if
you are using Python 2.
Special characters are sanitized using their (lowercase) unicode
name using the unicodedata module. For instance:
>>> unicodedata.name(u'$').lower()
'dollar sign'
As these names are often very long, this parameterized function
allows filtered, substitutions and transforms to help shorten these
names appropriately.
"""
version = param.ObjectSelector(sys.version_info.major, objects=[2,3], doc="""
The sanitization version. If set to 2, more aggressive
sanitization appropriate for Python 2 is applied. Otherwise,
if set to 3, more relaxed, Python 3 sanitization is used.""")
capitalize = param.Boolean(default=True, doc="""
Whether the first letter should be converted to
uppercase. Note, this will only be applied to ASCII characters
in order to make sure paths aren't confused with method
names.""")
eliminations = param.List(['extended', 'accent', 'small', 'letter', 'sign', 'digit',
'latin', 'greek', 'arabic-indic', 'with', 'dollar'], doc="""
Lowercase strings to be eliminated from the unicode names in
order to shorten the sanitized name ( lowercase). Redundant
strings should be removed but too much elimination could cause
two unique strings to map to the same sanitized output.""")
substitutions = param.Dict(default={'circumflex':'power',
'asterisk':'times',
'solidus':'over'}, doc="""
Lowercase substitutions of substrings in unicode names. For
instance the ^ character has the name 'circumflex accent' even
though it is more typically used for exponentiation. Note that
substitutions occur after filtering and that there should be no
ordering dependence between substitutions.""")
transforms = param.List(default=[capitalize_unicode_name], doc="""
List of string transformation functions to apply after
filtering and substitution in order to further compress the
unicode name. For instance, the default capitalize_unicode_name
function will turn the string "capital delta" into "Delta".""")
disallowed = param.List(default=['trait_names', '_ipython_display_',
'_getAttributeNames'], doc="""
An explicit list of name that should not be allowed as
attribute names on Tree objects.
By default, prevents IPython from creating an entry called
Trait_names due to an inconvenient getattr check (during
tab-completion).""")
disable_leading_underscore = param.Boolean(default=False, doc="""
Whether leading underscores should be allowed to be sanitized
with the leading prefix.""")
aliases = param.Dict(default={}, doc="""
A dictionary of aliases mapping long strings to their short,
sanitized equivalents""")
prefix = 'A_'
_lookup_table = param.Dict(default={}, doc="""
Cache of previously computed sanitizations""")
@param.parameterized.bothmethod
def add_aliases(self_or_cls, **kwargs):
"""
Conveniently add new aliases as keyword arguments. For instance
you can add a new alias with add_aliases(short='Longer string')
"""
self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
@param.parameterized.bothmethod
def remove_aliases(self_or_cls, aliases):
"""
Remove a list of aliases.
"""
for k,v in self_or_cls.aliases.items():
if v in aliases:
self_or_cls.aliases.pop(k)
@param.parameterized.bothmethod
def allowable(self_or_cls, name, disable_leading_underscore=None):
disabled_reprs = ['javascript', 'jpeg', 'json', 'latex',
'latex', 'pdf', 'png', 'svg', 'markdown']
disabled_ = (self_or_cls.disable_leading_underscore
if disable_leading_underscore is None
else disable_leading_underscore)
if disabled_ and name.startswith('_'):
return False
isrepr = any(('_repr_%s_' % el) == name for el in disabled_reprs)
return (name not in self_or_cls.disallowed) and not isrepr
@param.parameterized.bothmethod
def prefixed(self, identifier, version):
"""
Whether or not the identifier will be prefixed.
Strings that require the prefix are generally not recommended.
"""
invalid_starting = ['Mn', 'Mc', 'Nd', 'Pc']
if identifier.startswith('_'): return True
return((identifier[0] in string.digits) if version==2
else (unicodedata.category(identifier[0]) in invalid_starting))
@param.parameterized.bothmethod
def remove_diacritics(self_or_cls, identifier):
"""
Remove diacritics and accents from the input leaving other
unicode characters alone."""
chars = ''
for c in identifier:
replacement = unicodedata.normalize('NFKD', c).encode('ASCII', 'ignore')
if replacement != '':
chars += bytes_to_unicode(replacement)
else:
chars += c
return chars
@param.parameterized.bothmethod
def shortened_character_name(self_or_cls, c, eliminations=[], substitutions={}, transforms=[]):
"""
Given a unicode character c, return the shortened unicode name
(as a list of tokens) by applying the eliminations,
substitutions and transforms.
"""
name = unicodedata.name(c).lower()
# Filtering
for elim in eliminations:
name = name.replace(elim, '')
# Substitution
for i,o in substitutions.items():
name = name.replace(i, o)
for transform in transforms:
name = transform(name)
return ' '.join(name.strip().split()).replace(' ','_').replace('-','_')
def __call__(self, name, escape=True, version=None):
if name in [None, '']:
return name
elif name in self.aliases:
return self.aliases[name]
elif name in self._lookup_table:
return self._lookup_table[name]
name = bytes_to_unicode(name)
version = self.version if version is None else version
if not self.allowable(name):
raise AttributeError("String %r is in the disallowed list of attribute names: %r" % (name, self.disallowed))
if version == 2:
name = self.remove_diacritics(name)
if self.capitalize and name and name[0] in string.ascii_lowercase:
name = name[0].upper()+name[1:]
sanitized = (self.sanitize_py2(name) if version==2 else self.sanitize_py3(name))
if self.prefixed(name, version):
sanitized = self.prefix + sanitized
self._lookup_table[name] = sanitized
return sanitized
def _process_underscores(self, tokens):
"Strip underscores to make sure the number is correct after join"
groups = [[str(''.join(el))] if b else list(el)
for (b,el) in itertools.groupby(tokens, lambda k: k=='_')]
flattened = [el for group in groups for el in group]
processed = []
for token in flattened:
if token == '_': continue
if token.startswith('_'):
token = str(token[1:])
if token.endswith('_'):
token = str(token[:-1])
processed.append(token)
return processed
def sanitize_py2(self, name):
# This fix works but masks an issue in self.sanitize (py2)
prefix = '_' if name.startswith('_') else ''
valid_chars = string.ascii_letters+string.digits+'_'
return prefix + str('_'.join(self.sanitize(name, lambda c: c in valid_chars)))
def sanitize_py3(self, name):
if not name.isidentifier():
return '_'.join(self.sanitize(name, lambda c: ('_'+c).isidentifier()))
else:
return name
def sanitize(self, name, valid_fn):
"Accumulate blocks of hex and separate blocks by underscores"
invalid = {'\a':'a','\b':'b', '\v':'v','\f':'f','\r':'r'}
for cc in filter(lambda el: el in name, invalid.keys()):
raise Exception("Please use a raw string or escape control code '\%s'"
% invalid[cc])
sanitized, chars = [], ''
for split in name.split():
for c in split:
if valid_fn(c): chars += str(c) if c=='_' else c
else:
short = self.shortened_character_name(c, self.eliminations,
self.substitutions,
self.transforms)
sanitized.extend([chars] if chars else [])
if short != '':
sanitized.append(short)
chars = ''
if chars:
sanitized.extend([chars])
chars=''
return self._process_underscores(sanitized + ([chars] if chars else []))
sanitize_identifier = sanitize_identifier_fn.instance()
group_sanitizer = sanitize_identifier_fn.instance()
label_sanitizer = sanitize_identifier_fn.instance()
dimension_sanitizer = sanitize_identifier_fn.instance(capitalize=False)
def isscalar(val):
"""
Value is scalar or None
"""
return val is None or np.isscalar(val) or isinstance(val, datetime_types)
def isnumeric(val):
if isinstance(val, (basestring, bool, np.bool_)):
return False
try:
float(val)
return True
except:
return False
def asarray(arraylike, strict=True):
"""
Converts arraylike objects to NumPy ndarray types. Errors if
object is not arraylike and strict option is enabled.
"""
if isinstance(arraylike, np.ndarray):
return arraylike
elif isinstance(arraylike, list):
return np.asarray(arraylike, dtype=object)
elif not isinstance(arraylike, np.ndarray) and isinstance(arraylike, arraylike_types):
return arraylike.values
elif hasattr(arraylike, '__array__'):
return np.asarray(arraylike)
elif strict:
raise ValueError('Could not convert %s type to array' % type(arraylike))
return arraylike
nat_as_integer = np.datetime64('NAT').view('i8')
def isnat(val):
"""
Checks if the value is a NaT. Should only be called on datetimelike objects.
"""
if (isinstance(val, (np.datetime64, np.timedelta64)) or
(isinstance(val, np.ndarray) and val.dtype.kind == 'M')):
if numpy_version >= '1.13':
return np.isnat(val)
else:
return val.view('i8') == nat_as_integer
elif pd and val is pd.NaT:
return True
elif pd and isinstance(val, pandas_datetime_types+pandas_timedelta_types):
return pd.isna(val)
else:
return False
def isfinite(val):
"""
Helper function to determine if scalar or array value is finite extending
np.isfinite with support for None, string, datetime types.
"""
is_dask = is_dask_array(val)
if not np.isscalar(val) and not is_dask:
if isinstance(val, np.ma.core.MaskedArray):
return ~val.mask & isfinite(val.data)
elif isinstance(val, masked_types):
return ~val.isna() & isfinite(val._data)
val = asarray(val, strict=False)
if val is None:
return False
elif is_dask:
import dask.array as da
return da.isfinite(val)
elif isinstance(val, np.ndarray):
if val.dtype.kind == 'M':
return ~isnat(val)
elif val.dtype.kind == 'O':
return np.array([isfinite(v) for v in val], dtype=bool)
elif val.dtype.kind in 'US':
return ~pd.isna(val) if pd else np.ones_like(val, dtype=bool)
finite = np.isfinite(val)
if pd and pandas_version >= '1.0.0':
finite &= ~pd.isna(val)
return finite
elif isinstance(val, datetime_types+timedelta_types):
return not isnat(val)
elif isinstance(val, (basestring, bytes)):
return True
finite = np.isfinite(val)
if pd and pandas_version >= '1.0.0':
if finite is pd.NA:
return False
return finite & (~pd.isna(val))
return finite
def isdatetime(value):
"""
Whether the array or scalar is recognized datetime type.
"""
if isinstance(value, np.ndarray):
return (value.dtype.kind == "M" or
(value.dtype.kind == "O" and len(value) and
isinstance(value[0], datetime_types)))
else:
return isinstance(value, datetime_types)
def find_minmax(lims, olims):
"""
Takes (a1, a2) and (b1, b2) as input and returns
(np.nanmin(a1, b1), np.nanmax(a2, b2)). Used to calculate
min and max values of a number of items.
"""
try:
limzip = zip(list(lims), list(olims), [np.nanmin, np.nanmax])
limits = tuple([float(fn([l, ol])) for l, ol, fn in limzip])
except:
limits = (np.NaN, np.NaN)
return limits
def find_range(values, soft_range=[]):
"""
Safely finds either the numerical min and max of
a set of values, falling back to the first and
the last value in the sorted list of values.
"""
try:
values = np.array(values)
values = np.squeeze(values) if len(values.shape) > 1 else values
if len(soft_range):
values = np.concatenate([values, soft_range])
if values.dtype.kind == 'M':
return values.min(), values.max()
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
return np.nanmin(values), np.nanmax(values)
except:
try:
values = sorted(values)
return (values[0], values[-1])
except:
return (None, None)
def max_range(ranges, combined=True):
"""
Computes the maximal lower and upper bounds from a list bounds.
Args:
ranges (list of tuples): A list of range tuples
combined (boolean, optional): Whether to combine bounds
Whether range should be computed on lower and upper bound
independently or both at once
Returns:
The maximum range as a single tuple
"""
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
values = [tuple(np.NaN if v is None else v for v in r) for r in ranges]
if pd and any(isinstance(v, datetime_types) and not isinstance(v, cftime_types+(dt.time,))
for r in values for v in r):
converted = []
for l, h in values:
if isinstance(l, datetime_types) and isinstance(h, datetime_types):
l, h = (pd.Timestamp(l).to_datetime64(),
pd.Timestamp(h).to_datetime64())
converted.append((l, h))
values = converted
arr = np.array(values)
if not len(arr):
return np.NaN, np.NaN
elif arr.dtype.kind in 'OSU':
arr = list(python2sort([
v for r in values for v in r
if not is_nan(v) and v is not None]))
return arr[0], arr[-1]
elif arr.dtype.kind in 'M':
drange = ((arr.min(), arr.max()) if combined else
(arr[:, 0].min(), arr[:, 1].max()))
return drange
if combined:
return (np.nanmin(arr), np.nanmax(arr))
else:
return (np.nanmin(arr[:, 0]), np.nanmax(arr[:, 1]))
except:
return (np.NaN, np.NaN)
def range_pad(lower, upper, padding=None, log=False):
"""
Pads the range by a fraction of the interval
"""
if padding is not None and not isinstance(padding, tuple):
padding = (padding, padding)
if is_number(lower) and is_number(upper) and padding is not None:
if not isinstance(lower, datetime_types) and log and lower > 0 and upper > 0:
log_min = np.log(lower) / np.log(10)
log_max = np.log(upper) / np.log(10)
lspan = (log_max-log_min)*(1+padding[0]*2)
uspan = (log_max-log_min)*(1+padding[1]*2)
center = (log_min+log_max) / 2.0
start, end = np.power(10, center-lspan/2.), np.power(10, center+uspan/2.)
else:
if isinstance(lower, datetime_types) and not isinstance(lower, cftime_types):
# Ensure timedelta can be safely divided
lower, upper = np.datetime64(lower), np.datetime64(upper)
span = (upper-lower).astype('>m8[ns]')
else:
span = (upper-lower)
lpad = span*(padding[0])
upad = span*(padding[1])
start, end = lower-lpad, upper+upad
else:
start, end = lower, upper
return start, end
def dimension_range(lower, upper, hard_range, soft_range, padding=None, log=False):
"""
Computes the range along a dimension by combining the data range
with the Dimension soft_range and range.
"""
plower, pupper = range_pad(lower, upper, padding, log)
if isfinite(soft_range[0]) and soft_range[0] <= lower:
lower = soft_range[0]
else:
lower = max_range([(plower, None), (soft_range[0], None)])[0]
if isfinite(soft_range[1]) and soft_range[1] >= upper:
upper = soft_range[1]
else:
upper = max_range([(None, pupper), (None, soft_range[1])])[1]
dmin, dmax = hard_range
lower = lower if dmin is None or not isfinite(dmin) else dmin
upper = upper if dmax is None or not isfinite(dmax) else dmax
return lower, upper
def max_extents(extents, zrange=False):
"""
Computes the maximal extent in 2D and 3D space from
list of 4-tuples or 6-tuples. If zrange is enabled
all extents are converted to 6-tuples to compute
x-, y- and z-limits.
"""
if zrange:
num = 6
inds = [(0, 3), (1, 4), (2, 5)]
extents = [e if len(e) == 6 else (e[0], e[1], None,
e[2], e[3], None)
for e in extents]
else:
num = 4
inds = [(0, 2), (1, 3)]
arr = list(zip(*extents)) if extents else []
extents = [np.NaN] * num
if len(arr) == 0:
return extents
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for lidx, uidx in inds:
lower = [v for v in arr[lidx] if v is not None and not is_nan(v)]
upper = [v for v in arr[uidx] if v is not None and not is_nan(v)]
if lower and isinstance(lower[0], datetime_types):
extents[lidx] = np.min(lower)
elif any(isinstance(l, basestring) for l in lower):
extents[lidx] = np.sort(lower)[0]
elif lower:
extents[lidx] = np.nanmin(lower)
if upper and isinstance(upper[0], datetime_types):
extents[uidx] = np.max(upper)
elif any(isinstance(u, basestring) for u in upper):
extents[uidx] = np.sort(upper)[-1]
elif upper:
extents[uidx] = np.nanmax(upper)
return tuple(extents)
def int_to_alpha(n, upper=True):
"Generates alphanumeric labels of form A-Z, AA-ZZ etc."
casenum = 65 if upper else 97
label = ''
count= 0
if n == 0: return str(chr(n + casenum))
while n >= 0:
mod, div = n % 26, n
for _ in range(count):
div //= 26
div %= 26
if count == 0:
val = mod
else:
val = div
label += str(chr(val + casenum))
count += 1
n -= 26**count
return label[::-1]
def int_to_roman(input):
if type(input) != type(1):
raise TypeError("expected integer, got %s" % type(input))
if not 0 < input < 4000:
raise ValueError("Argument must be between 1 and 3999")
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def unique_iterator(seq):
"""
Returns an iterator containing all non-duplicate elements
in the input sequence.
"""
seen = set()
for item in seq:
if item not in seen:
seen.add(item)
yield item
def lzip(*args):
"""
zip function that returns a list.
"""
return list(zip(*args))
def unique_zip(*args):
"""
Returns a unique list of zipped values.
"""
return list(unique_iterator(zip(*args)))
def unique_array(arr):
"""
Returns an array of unique values in the input order.
Args:
arr (np.ndarray or list): The array to compute unique values on
Returns:
A new array of unique values
"""
if not len(arr):
return np.asarray(arr)
elif pd:
if isinstance(arr, np.ndarray) and arr.dtype.kind not in 'MO':
# Avoid expensive unpacking if not potentially datetime
return pd.unique(arr)
values = []
for v in arr:
if (isinstance(v, datetime_types) and
not isinstance(v, cftime_types)):
v = pd.Timestamp(v).to_datetime64()
values.append(v)
return | pd.unique(values) | pandas.unique |
import pandas as pd
import numpy as np
import seaborn as sns
import datetime
from scipy import stats
import matplotlib.pyplot as plt
# importing the data set ***** important all data was cleaned using sqlserver Inteligente Business and SQL to fill all the missing data with mean value of the collumns
# some rows was with categorical data in a numerical column that rows was replaced with mean of that column
# some fare amount was 0.00 and that can be free ride " i guess" so a replace that also
# i didnt know what to do with trip_distance = 0 so a didnt do nothing but i was thinking to delete the row
dataset_Final = pd.read_csv('yellow_tripdata_2018-01.csv')
dataset_Final = pd.read_csv('yellow_tripdata_2018-02.csv')
dataset_Final = pd.read_csv('yellow_tripdata_2018-03.csv')
dataset_Final = | pd.read_csv('yellow_tripdata_2018-04.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
class DataLoader:
path = '../data/'
# function for loading data from disk
@classmethod
def load_data(self):
"""
this function is responsible for loading traing data from disk.
and performs some basic opertaions like
- one-hot encoding
- feature scaling
- reshaping data
Parameters:
(no-parameters)
Returns:
X : numpy array
y : numpy array
"""
if(not Path(self.path+'train.csv').is_file()):
print("[util]: train data not found at '",self.path,"'")
#quit()
print("[util]: Loading '",self.path+'train.csv',"'")
train_df = pd.read_csv(self.path+'train.csv')
y = np.array(pd.get_dummies(train_df['label']))
X = train_df.drop(['label'], axis=1)
X = np.array(X)
X = X.reshape(X.shape[0],28,28)
y = y.reshape(y.shape + (1,))
del train_df
return X, y
@classmethod
def load_test(self):
"""
this function is responsible for loading test data from disk.
and performs - reshaping data
Parameters:
(no-parameters)
Returns:
test_x : numpy array
"""
if(not Path(self.path+'test.csv').is_file()):
print("[util]: test data not found at '",self.path,"'")
#quit()
print("[util]: Loading '",self.path+'test.csv',"'")
test_df = pd.read_csv(self.path+'test.csv')
test_x = np.array(test_df)
test_x = test_x.reshape(test_x.shape[0],28,28)
del test_df
return test_x
# custom function for saving kaggle test data predictions
@classmethod
def save_predictions(self, preds, filename='new_submission.csv'):
"""
this function is responsible for saving test predictions to given filename.
Parameters:
preds : numpy array (all the predictions of test set)
filename: str (filename for saving & identifying different test predictions)
Returns:
(no-returns)
"""
sub_path = self.path+'sample_submission.csv'
if(not Path(sub_path).is_file()):
print("[util]: sample_submission file not found at '",sub_path,"',\n\t it is required to get submission format")
submission = | pd.read_csv(sub_path) | pandas.read_csv |
"""
Active Fairness Run through questions
"""
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.calibration import _SigmoidCalibration
from sklearn.isotonic import IsotonicRegression
from joblib import Parallel, delayed
import pathos.multiprocessing as multiprocessing
from sklearn.model_selection import train_test_split
from numpy import genfromtxt
import numpy as np
from collections import Counter
import numpy as np
import pandas as pd
import time
import random
from copy import deepcopy
class TreeNode:
'''
A node in the "featured tree"
'''
def __init__(self, threshold, dummy = False):
'''
threshold: The threshold of this node
dummy: whether it's a fake node or not (The fake node can only be the root node of the tree)
'''
self.children_left = [] # nodes in its left (and of lower level in original tree)
self.children_right = [] # nodes in its right (and of lower level in original tree)
self.threshold = threshold
self.node_set = [set(), set()] # set of leaf nodes in its left and right,
# self.node_set[0] are the nodes in the left
# self.node_set[1] are the nodes in the right
self.dummy = dummy
class TreeProcess:
def __init__(self, tree, all_features):
'''
tree: the tree trained by random forest
all_features: all possible features in this tree
'''
rootNode = 0
node_trace = []
self.children_left = tree.children_left
self.children_right = tree.children_right
child_left_dict = {}
child_right_dict = {}
for i in range(len(self.children_left)):
child_left_dict[i] = self.children_left[i]
for i in range(len(self.children_right)):
child_right_dict[i] = self.children_right[i]
self.threshold = tree.threshold
self.feature = tree.feature
self.values = tree.value
self.weighted_samples = tree.weighted_n_node_samples
# children_left, children_right, threshold, feature, values, weighted_samples used as a dict. Can provide corresponding value given an index of that node.
self.total_leaf_id = set() # ids of all leaves in this tree
self.feature2nodes = {} # dict, key is the name of features, value is the TreeNode object of the root for that 'feature tree'
self.nodeid2TreeNode = {} # dict, key is the id of nodes in original tree, value is the TreeNode object corresponds to that node
self.feature2threshold_list = {} # dict, key is name of features, value is a list of all thresholds for that feature
self.featureAndthreshold2delete_set = {} # dict, key is name of features, value is another dict, with key as threshold value, and value as a set of leaf node ids to be delted
self.tree_single_value_shape = np.shape(self.values[0]) # imitate the shape of 'self.values[0]'
self.unique_feature = set() # total features exist in this tree (different from self.feature, which are features)
if self.feature[rootNode] == -2:
assert False, "The root of a tree is a leaf, please verify"
for feature in all_features:
# construct feature tree for all features
queue = [rootNode]
if feature == self.feature[rootNode]:
# if the root node of original tree is of this feature, there is no need for a dummy
queue = []
self.nodeid2TreeNode[rootNode] = TreeNode(self.threshold[rootNode])
self.feature2nodes[feature] = self.nodeid2TreeNode[rootNode]
result_list = []
left_node = self.children_left[rootNode]
self.node_traverse(result_list, left_node, feature) # get all non-leaf nodes of this feature in the left sub-tree
self.nodeid2TreeNode[rootNode].children_left = result_list
result_list = []
right_node = self.children_right[rootNode]
self.node_traverse(result_list, right_node, feature) # get all non-leaf nodes of this feature in the right sub-tree
self.nodeid2TreeNode[rootNode].children_right = result_list
result_set = set()
self.node_traverse_leaf(result_set, left_node) # get all leaf nodes it can reach in the left sub-tree
self.nodeid2TreeNode[rootNode].node_set[0] = result_set
result_set = set()
self.node_traverse_leaf(result_set, right_node) # get all leaf nodes it can reach in the right sub-tree
self.nodeid2TreeNode[rootNode].node_set[1] = result_set
queue.append(left_node)
queue.append(right_node)
else:
# if the root node of original tree is not of this feature, we need to have a dummy root for this feature tree
self.feature2nodes[feature] = TreeNode(-1, True) # add a dummy root
result_list = []
left_node = self.children_left[rootNode]
self.node_traverse(result_list, left_node, feature) # get all non-leaf nodes of this feature in the left sub-tree
self.feature2nodes[feature].children_left = result_list
result_list = []
right_node = self.children_right[rootNode]
self.node_traverse(result_list, right_node, feature)# get all non-leaf nodes of this feature in the right sub-tree
self.feature2nodes[feature].children_right = result_list
while queue:
current_node = queue.pop(0)
if feature == self.feature[current_node]:
# find a node of given feature
self.nodeid2TreeNode[current_node] = TreeNode(self.threshold[current_node])
result_list = []
left_node = self.children_left[current_node]
self.node_traverse(result_list, left_node, feature) # get all non-leaf nodes of this feature in the left sub-tree
self.nodeid2TreeNode[current_node].children_left = result_list
result_list = []
right_node = self.children_right[current_node]
self.node_traverse(result_list, right_node, feature) # get all non-leaf nodes of this feature in the right sub-tree
self.nodeid2TreeNode[current_node].children_right = result_list
result_set = set()
self.node_traverse_leaf(result_set, left_node)
self.nodeid2TreeNode[current_node].node_set[0] = result_set # get all leaf nodes it can reach in the left sub-tree
result_set = set()
self.node_traverse_leaf(result_set, right_node)
self.nodeid2TreeNode[current_node].node_set[1] = result_set # get all leaf nodes it can reach in the right sub-tree
if self.feature[current_node] != -2:
# if not the leaf
queue.append(self.children_left[current_node])
queue.append(self.children_right[current_node])
for feature in all_features:
threshold_set = set()
queue = [self.feature2nodes[feature]] # get the root in feature tree
while queue:
currentNode = queue.pop(0)
if currentNode.dummy != True:
threshold_set.add(currentNode.threshold)
for node in currentNode.children_left:
queue.append(self.nodeid2TreeNode[node])
for node in currentNode.children_right:
queue.append(self.nodeid2TreeNode[node])
threshold_list = sorted(list(threshold_set)) # rank the list in increasing threshold
self.feature2threshold_list[feature] = threshold_list
self.featureAndthreshold2delete_set[feature] = {}
for feature in self.feature2threshold_list.keys():
l = len(self.feature2threshold_list[feature])
if l == 0:
continue
for i in range(l):
threshold = self.feature2threshold_list[feature][i]
delete_set_equal_or_less = set() # the nodes to be deleted if equal or less than the threshold
queue = [self.feature2nodes[feature]] # the root of feature tree
while queue:
currentTreeNode = queue.pop(0)
if currentTreeNode.dummy == True:
for node in currentTreeNode.children_left:
queue.append(self.nodeid2TreeNode[node])
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
else:
if threshold <= currentTreeNode.threshold:
# current value (threshold) is equal or less than threshold for this node, go to the left sub-tree for this node
for node in currentTreeNode.children_left:
queue.append(self.nodeid2TreeNode[node])
delete_set_equal_or_less |= currentTreeNode.node_set[1] # delete all leaf-nodes can be reached in the right sub-tree
else:
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
delete_set_equal_or_less |= currentTreeNode.node_set[0]
self.featureAndthreshold2delete_set[feature][threshold] = delete_set_equal_or_less
delete_set_larger = set() # the nodes to be deleted if larger than the threshold
queue = [self.feature2nodes[feature]] # the root of feature tree
while queue:
currentTreeNode = queue.pop(0)
if currentTreeNode.dummy == True:
for node in currentTreeNode.children_left:
queue.append(self.nodeid2TreeNode[node])
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
else:
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
delete_set_larger |= currentTreeNode.node_set[0]
self.featureAndthreshold2delete_set[feature][np.inf] = delete_set_larger
for feature in self.feature2threshold_list.keys():
if len(self.feature2threshold_list[feature]) > 0:
self.unique_feature.add(feature)
def node_traverse_leaf(self,
result_set,
currentNode):
# get all leaf nodes which can be reached starting from one node
nodeFeature = self.feature[currentNode]
if nodeFeature == -2:
result_set.add(currentNode)
self.total_leaf_id.add(currentNode)
return
self.node_traverse_leaf(result_set, self.children_left[currentNode])
self.node_traverse_leaf(result_set, self.children_right[currentNode])
def node_traverse(self,
result_list,
currentNode,
feature_target):
nodeFeature = self.feature[currentNode]
if nodeFeature == feature_target:
result_list.append(currentNode)
return
if nodeFeature == -2:
return
self.node_traverse(result_list, self.children_left[currentNode], feature_target)
self.node_traverse(result_list, self.children_right[currentNode], feature_target)
class ActiveFairness(object):
def __init__(self,
dataset_train, dataset_test,
clf,
sensitive_features = [],
target_label = []):
'''
dataset_train: training dataset, type: MexicoDataset()
dataset_test: testing dataset, type: MexicoDataset()
clf: trained randomforest classifier
sensitive_features: a list of sensitive features which should be removed when doing prediction
target_label: a list of features whose values are to be predicted
'''
assert len(target_label) == 1, print("Error in ActiveFairness, length of target_label not defined")
train = dataset_train.features
complete_data = dataset_train.metadata['previous'][0]
self.feature2columnmap = {}
test = dataset_test.features
feature_name = pd.DataFrame(complete_data.feature_names)
y_column_index = ~(feature_name.isin(sensitive_features + target_label).iloc[:, 0])
y_column_index_inverse = (feature_name.isin(sensitive_features + target_label).iloc[:, 0])
index = 0
for i in range(len(y_column_index_inverse)):
if y_column_index_inverse.iloc[i] == True:
self.feature2columnmap[complete_data.feature_names[i]] = index
index += 1
self.target_label = target_label
self.sensitive_features = sensitive_features
self.dataset_train = dataset_train
self.dataset_test = dataset_test
self.X_tr_sensitiveAtarget = pd.DataFrame(train[:, y_column_index_inverse]) # the dataframe of all samples in training dataset which only keeps the non-sensitive and target features
self.X_tr = pd.DataFrame(train[:, y_column_index])
self.y_tr = pd.DataFrame(self.dataset_train.labels[:, 0]).iloc[:, 0]
self.X_te_sensitiveAtarget = pd.DataFrame(test[:, y_column_index_inverse]) # the dataframe of all samples in testing dataset which only keeps the non-sensitive and target features
self.X_te = pd.DataFrame(test[:, y_column_index])
self.y_te = pd.DataFrame(self.dataset_test.labels[:, 0]).iloc[:, 0]
self.clf = clf
self.trees = []
def fit(self):
# This is a temporary implementation
self.clf = self.clf.fit(self.X_tr, self.y_tr)
self.features_by_importance = self.clf.feature_importances_.argsort()[::-1] # get the importance of features based on trained RF
self.all_features = list(range(self.X_te.shape[1]))
def predict(self, train):
if train == True:
Y_tr_predict = self.clf.predict(self.X_tr)
re_dataset_train = deepcopy(self.dataset_train)
re_dataset_train.labels = Y_tr_predict
return re_dataset_train
else:
Y_te_predict = self.clf.predict(self.X_te)
re_dataset_test = deepcopy(self.dataset_test)
re_dataset_test.labels = Y_te_predict
return re_dataset_test
# choose the appropriate number of features to ask for Group A and B
def choose_appropriate_num_of_feature(self, privilige_feature, privilige_value, unprivilige_value, \
total_budget, feat_method = 'feat-imp', run_on_training = False):
num_of_priviledge = 0
num_of_unpriviledge = 0
dataset = self.X_te_sensitiveAtarget if run_on_training == False else self.X_tr_sensitiveAtarget
featured_dataset = self.X_te if run_on_training == False else self.X_tr
for i in range(len(dataset)):
if dataset.iloc[i, self.feature2columnmap[privilige_feature]] == privilige_value:
# priviledge class
num_of_priviledge += 1
else:
assert dataset.iloc[i, self.feature2columnmap[privilige_feature]] == unprivilige_value, "Value incorrect!"
num_of_unpriviledge += 1
total_num = num_of_priviledge + num_of_unpriviledge
current_num_of_feature_for_priviledge = 0
current_num_of_feature_for_unpriviledge = 0
budget_used = 0
# batch_size = 500
# nr_of_batches = total_num // batch_size + 2
dataset_orig = self.dataset_test if run_on_training == False else self.dataset_train
self.trees = [TreeProcess(value.tree_, self.all_features) for value in self.clf.estimators_]
features_by_importance = self.features_by_importance
last_add_privi = True
result = np.zeros([len(dataset)], dtype = np.float32)
priviledge_index = []
unprivilege_index = []
for i in range(len(dataset)):
if dataset.iloc[i, self.feature2columnmap[privilige_feature]] == privilige_value:
priviledge_index.append(i)
else:
unprivilege_index.append(i)
less_than_pri = np.array(dataset_orig.labels[priviledge_index] <= 0.5, dtype = bool)[:, 0]
less_than_unpri = np.array(dataset_orig.labels[unprivilege_index] <= 0.5, dtype = bool)[:, 0]
previous_answers = [[tree.total_leaf_id.copy() for tree in self.trees] for i in range(len(dataset))]
print("Start the process")
while budget_used < total_budget:
# FP_pri = 0
# TN_pri = 0
# FP_unpri = 0
# TN_unpri = 0
if current_num_of_feature_for_priviledge == 0:
FP_pri = 1
TN_pri = 0
else:
privi_predict_result = np.array(result[priviledge_index] > 0.5, dtype = bool)
FP_pri = np.sum(privi_predict_result * less_than_pri)
TN_pri = np.sum((1 - privi_predict_result) * less_than_pri)
if current_num_of_feature_for_unpriviledge == 0:
FP_unpri = 1
TN_unpri = 0
else:
unprivi_predict_result = np.array(result[unprivilege_index] > 0.5, dtype = bool)
FP_unpri = np.sum(unprivi_predict_result * less_than_unpri)
TN_unpri = np.sum((1 - unprivi_predict_result) * less_than_unpri)
# for i in range(len(dataset)):
# if dataset.iloc[i, self.feature2columnmap[privilige_feature]] == privilige_value:
# # priviledge class
# if dataset_orig.labels[i] <= 0.5:
# # actual negative
# if current_num_of_feature_for_priviledge == 0:
# FP_pri += 1
# else:
# if result[i] > 0.5:
# FP_pri += 1
# else:
# TN_pri += 1
# else:
# if dataset_orig.labels[i] <= 0.5:
# # actual negative
# if current_num_of_feature_for_unpriviledge == 0:
# FP_unpri += 1
# else:
# if result[i] > 0.5:
# FP_unpri += 1
# else:
# TN_unpri += 1
FPR_pri = FP_pri * 1.0 / (FP_pri + TN_pri)
FPR_unpri = FP_unpri * 1.0 / (FP_unpri + TN_unpri)
result[:] = 0
if FPR_pri > FPR_unpri:
current_num_of_feature_for_priviledge += 1
last_add_privi = True
budget_used += (num_of_priviledge* 1.0 / total_num)
else:
current_num_of_feature_for_unpriviledge += 1
last_add_privi = False
budget_used += (num_of_unpriviledge * 1.0 / total_num)
print("budget_used", budget_used)
print("FPR_pri", FPR_pri)
print("FPR_unpri", FPR_unpri)
print("FP_pri", FP_pri)
print("TN_pri", TN_pri)
print("FP_unpri", FP_unpri)
print("TN_unpri", TN_unpri)
features = deepcopy(self.all_features)
for j in range(len(dataset)):
test_example_full = featured_dataset.iloc[j, :].values.astype(float)
if dataset.iloc[j, self.feature2columnmap[privilige_feature]] == privilige_value and last_add_privi == True:
# priviledge class
if feat_method == 'random':
new_feature = random.sample(features,1)[0]
features.remove(new_feature)
elif feat_method == 'feat-imp':
new_feature = features_by_importance[current_num_of_feature_for_priviledge]
elif feat_method == 'ask-town':
assert False, "Error 385, not supported yet"
new_feature = getTheNextBestFeature(self.trees, features, test_example, previous_answers, p_cur, absolutes_on=False)
features.remove(new_feature)
elif feat_method == 'abs-agg':
assert False, "Error 389, not supported yet"
new_feature = getTheNextBestFeature(self.trees, features, test_example, previous_answers, p_cur)
features.remove(new_feature)
else:
raise Exception('mode has not been implemented')
p_dict, p_cur = calcPValuesPerTree(test_example_full, self.trees, previous_answers[j], new_feature)
result[j] = 1 - p_cur # somehow inversed
elif dataset.iloc[j, self.feature2columnmap[privilige_feature]] != privilige_value and last_add_privi == False:
if feat_method == 'random':
new_feature = random.sample(features,1)[0]
features.remove(new_feature)
elif feat_method == 'feat-imp':
new_feature = features_by_importance[current_num_of_feature_for_unpriviledge]
elif feat_method == 'ask-town':
assert False, "Error 385, not supported yet"
new_feature = getTheNextBestFeature(self.trees, features, test_example, previous_answers, p_cur, absolutes_on=False)
features.remove(new_feature)
elif feat_method == 'abs-agg':
assert False, "Error 389, not supported yet"
new_feature = getTheNextBestFeature(self.trees, features, test_example, previous_answers, p_cur)
features.remove(new_feature)
else:
raise Exception('mode has not been implemented')
p_dict, p_cur = calcPValuesPerTree(test_example_full, self.trees, previous_answers[j], new_feature)
result[j] = 1 - p_cur # somehow inversed
return current_num_of_feature_for_priviledge, current_num_of_feature_for_unpriviledge
def run_algo_in_parallel(self, new_feat_mode,
sensitive_name,
privilige_variable_value,
unprivilige_variable_value,
pri_num_feature_fetched,
un_pri_num_feature_fetched,
verbose=1,
plot_any=False,
batch_size=512,
nr_of_batches=100,
save_to_file=True,
run_on_training=False,
save_folder='',
show_no_words = False):
assert (len(save_folder) == 0) or (save_to_file)
X_tr = self.X_tr
y_tr = self.y_tr
if run_on_training:
X_te = self.X_tr
y_te = self.y_tr
X_sensi_te = self.X_tr_sensitiveAtarget
else:
X_te = self.X_te
y_te = self.y_te
X_sensi_te = self.X_te_sensitiveAtarget
clf = self.clf
self.trees = [TreeProcess(value.tree_, self.all_features) for value in clf.estimators_]
all_features = self.all_features
features_by_importance = self.features_by_importance
start_time2 = time.time()
results = []
for ii in range(nr_of_batches):
start = ii*batch_size
end = min(X_te.shape[0] ,(ii+1) * batch_size)
if start >= X_te.shape[0]:
break
if show_no_words == False:
print('START',start, 'END', end)
results_one = [run_per_test_case(i, X_tr, y_tr, X_te, y_te, X_sensi_te, sensitive_name, privilige_variable_value, \
unprivilige_variable_value, pri_num_feature_fetched, un_pri_num_feature_fetched, self.feature2columnmap, \
verbose, new_feat_mode, clf, start_time2, all_features, features_by_importance, self.trees) for i in np.arange(start,end)]
results.extend(results_one)
l = len(results)
ser_p = [pd.Series(results[i]['p_list'], name=results[i]['index']) for i in range(l)]
df_p = pd.concat(ser_p,axis=1).transpose()
df_p = (1-df_p) #correcting because somehow the p's are inversed
ser_qa = [pd.Series(results[i]['qa'], name=results[i]['index']) for i in range(l)]
df_qa = | pd.concat(ser_qa,axis=1) | pandas.concat |
# flu prediction
import os
import pandas as pd
import feather
from utils.fastai.structured import *
from utils.fastai.column_data import *
from sklearn import preprocessing
from sklearn.metrics import classification_report, confusion_matrix
import keras
from keras.layers import Input, Embedding, Dense, Dropout
from keras.models import Model
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras import metrics
pd.set_option('display.width', 250)
data_path = os.environ['DATA_DIR'] + 'epidata_flu/'
def drop_columns(df, cols):
"""drop columns form dataframe"""
df = df.drop(cols, axis=1)
return df
def show_prediction(model, raw_df, epiyear):
"""
compare prediction from actual values given epiyear
"""
def proc_df(df, max_n_cat=None, mapper=None):
""" standardizes continuous columns and numericalizes categorical columns
Parameters:
-----------
df: The data frame you wish to process.
max_n_cat: The maximum number of categories to break into dummy values, instead
of integer codes.
mapper: calculates the values used for scaling of variables during training time(mean
and standard deviation).
Returns:
--------
x: x is the transformed version of df. x will not have the response variable
and is entirely numeric.
mapper: A DataFrameMapper which stores the mean and standard deviation of the
corresponding continous variables which is then used for scaling of during test-time.
"""
df = df.copy()
mapper = scale_vars(df, mapper)
for n, c in df.items():
numericalize(df, c, n, max_n_cat)
return | pd.get_dummies(df, dummy_na=True) | pandas.get_dummies |
import pandas as pd
from string import punctuation
import nltk
from IPython.core.display import display
nltk.download('tagsets')
from nltk.data import load
nltk.download('averaged_perceptron_tagger')
from nltk import pos_tag
from nltk import word_tokenize
from collections import Counter
def get_tagsets():
tagdict = load('help/tagsets/upenn_tagset.pickle')
return list(tagdict.keys())
tag_list = get_tagsets()
print(tag_list)
# This method will count occurrence of pos tags in each sentence.
def get_pos_occurrence_freq(data, tag_list):
# Get list of sentences in text_list
text_list = data.text
# create empty dataframe
feature_df = | pd.DataFrame(columns=tag_list) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pymysql
import seaborn as sns
from sklearn.linear_model import Lasso
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform as sp_rand
import pickle
import os
import warnings
currentpath = os.getcwd()
warnings.filterwarnings('ignore')
# DB 연결
"""
connToRating = pymysql.connect(host="localhost", user="root", password="<PASSWORD>",
db="rating_data", charset="utf8")
cursor = connToRating.cursor(pymysql.cursors.DictCursor)
query = "SELECT * FROM ratings"
cursor.execute(query)
ratings = pd.read_sql(query, connToRating)
"""
# load가 오래 걸려 우선은 inner data로 진행
ratings = pd.read_csv('analysisapp/data/ratings.csv')
genome_scores = pd.read_csv('analysisapp/data/genome-scores.csv')
genome_tags = pd.read_csv('analysisapp/data/genome-tags.csv')
my_ratings = pd.read_csv('analysisapp/data/my_ratings_input.csv')
movies = pd.read_csv('analysisapp/data/movies.csv')
with open('analysisapp/data/genres.p', 'rb') as f:
genres = pickle.load(f)
genres = | pd.read_pickle('analysisapp/data/genres.p') | pandas.read_pickle |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": pandas.StringDtype(),
"secVenCmdCount": | pandas.StringDtype() | pandas.StringDtype |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 10:06:16 2018
@author: <NAME>
"""
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import autocorrelation_plot
from pandas import ExcelWriter
import numpy as np
import scipy as sp
###this file is used to calculate local flows for control points##
#reading in cp historical data#
#data starts 10/01/1952
cfs_to_cms = 0.0283168
ALBin = pd.read_excel('CP_historical/ALBANY.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
SALin = pd.read_excel('CP_historical/SALEM.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
HARin = pd.read_excel('CP_historical/HARRISBURG.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
HARshift = pd.read_excel('CP_historical/HARRISBURG.xlsx',usecols=[2,3],skiprows=1735+92,skipfooter=3380+274,header=None) #shifted one day ahead
VIDin = pd.read_excel('CP_historical/VIDA.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
JEFin = pd.read_excel('CP_historical/JEFFERSON.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
MEHin = pd.read_excel('CP_historical/MEHAMA.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
MONin = pd.read_excel('CP_historical/MONROE.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
MONshift = pd.read_excel('CP_historical/MONROE.xlsx',usecols=[2,3],skiprows=1735+92,skipfooter=3380+274,header=None) #shifted one day ahead
WATin = pd.read_excel('CP_historical/WATERLOO.xlsx',usecols=[2,3],skiprows=1736+92,skipfooter=3379+274,header=None)
JASin = pd.read_excel('CP_historical/JASPER.xlsx',usecols=[2,3],skiprows=93,skipfooter=3379+274,header=None)
GOSin = pd.read_excel('CP_historical/GOSHEN.xlsx',usecols=[2,3],skiprows=732+91,skipfooter=3379+274,header=None)
#read in historical res outflow BPA data###
top=8951
bottom=274
BLUout = pd.read_excel('BLU5H_daily.xls',skiprows=top, skipfooter=bottom,header=None) #only using data from 2005
BCLout = pd.read_excel('BCL5H_daily.xls',skiprows=top, skipfooter=bottom,header=None)#only using data from 2005
CGRout = pd.read_excel('CGR5H_daily.xls',skiprows=top, skipfooter=bottom,header=None)
DETout = pd.read_excel('DET5H_daily.xls',skiprows=top, skipfooter=bottom,header=None)
DEXout = pd.read_excel('LOP5H_daily.xls',skiprows=top, skipfooter=bottom,header=None)
DORout = pd.read_excel('DOR5H_daily.xls',skiprows=top, skipfooter=bottom,header=None)
FALout = pd.read_excel('FAL5H_daily.xls',skiprows=top, skipfooter=bottom,header=None)
FOSout = pd.read_excel('FOS5H_daily.xls',skiprows=top, skipfooter=bottom,header=None)
FRNout = pd.read_excel('FRN5H_daily.xls',skiprows=top, skipfooter=bottom,header=None)
GPRout = | pd.read_excel('GPR5H_daily.xls',skiprows=top, skipfooter=bottom,header=None) | pandas.read_excel |
import matplotlib
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split, TimeSeriesSplit, ParameterGrid, GridSearchCV
from keras.wrappers.scikit_learn import KerasRegressor
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
import pickle
# import data
data = pd.read_csv('../data/MarketData_ClosesOnly.csv', index_col=0)
# fill non-traded bars with previous data
data.ffill(axis=0, inplace=True)
# separate the independent variables from the dependent variable in X and Y
X = data.iloc[:, 0:7]
y = data.iloc[:, 8:9]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, shuffle=False)
test_index = X_test.index.values.tolist()
scalarX = StandardScaler()
scalary = StandardScaler()
scalarX.fit(X_train)
scalary.fit(y_train)
X_train, X_test = scalarX.transform(X_train), scalarX.transform(X_test)
y_train, y_test = scalary.transform(y_train), scalary.transform(y_test)
def build_regressor():
regressor = Sequential()
regressor.add(Dense(units=512, input_dim=7, activation='relu'))
regressor.add(Dense(units=256, activation='relu'))
regressor.add(Dense(units=128, activation='relu'))
regressor.add(Dense(units=64, activation='relu'))
regressor.add(Dense(units=1))
regressor.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae', 'accuracy'])
return regressor
regressor = KerasRegressor(build_fn=build_regressor)
ann_params = {'batch_size': [128],
'nb_epoch': [500]
}
gsc = GridSearchCV(regressor, param_grid=ann_params,
cv=TimeSeriesSplit(n_splits=10).split(X_train), verbose=10, n_jobs=-1, refit=True)
gsc.fit(X_train, y_train)
gsc_dataframe = pd.DataFrame(gsc.cv_results_)
y_pred = gsc.predict(X_test)
y_pred = y_pred.reshape(-1, 1)
y_pred = scalary.inverse_transform(y_pred)
y_test = scalary.inverse_transform(y_test)
mae = round(metrics.mean_absolute_error(y_test, y_pred), 2)
mse = round(metrics.mean_squared_error(y_test, y_pred), 2)
y_df = pd.DataFrame(index= | pd.to_datetime(test_index) | pandas.to_datetime |
import os
import warnings
import numpy as np
import pandas as pd
from gisutils import df2shp, project
from shapely.geometry import Point
from mfsetup.fileio import append_csv, check_source_files
from mfsetup.grid import get_ij
from mfsetup.sourcedata import TransientTabularSourceData
from mfsetup.tmr import Tmr
from mfsetup.wateruse import get_mean_pumping_rates, resample_pumping_rates
def setup_wel_data(model, for_external_files=True):
"""Performs the part of well package setup that is independent of
MODFLOW version. Returns a DataFrame with the information
needed to set up stress_period_data.
"""
# default options for distributing fluxes vertically
vfd_defaults = {'across_layers': False,
'distribute_by': 'transmissivity',
'screen_top_col': 'screen_top',
'screen_botm_col': 'screen_botm',
'minimum_layer_thickness': model.cfg['wel'].get('minimum_layer_thickness', 2.)
}
# master dataframe for stress period data
columns = ['per', 'k', 'i', 'j', 'q', 'boundname']
df = pd.DataFrame(columns=columns)
# check for source data
datasets = model.cfg['wel'].get('source_data')
# delete the dropped wells file if it exists, to avoid confusion
dropped_wells_file = model.cfg['wel']['output_files']['dropped_wells_file'].format(model.name)
if os.path.exists(dropped_wells_file):
os.remove(dropped_wells_file)
# get well package input from source (parent) model in lieu of source data
# todo: fetching correct well package from mf6 parent model
if datasets is None and model.cfg['parent'].get('default_source_data') \
and hasattr(model.parent, 'wel'):
# get well stress period data from mfnwt or mf6 model
parent = model.parent
spd = get_package_stress_period_data(parent, package_name='wel')
# map the parent stress period data to inset stress periods
periods = spd.groupby('per')
dfs = []
for inset_per, parent_per in model.parent_stress_periods.items():
if parent_per in periods.groups:
period = periods.get_group(parent_per)
if len(dfs) > 0 and period.drop('per', axis=1).equals(dfs[-1].drop('per', axis=1)):
continue
else:
dfs.append(period)
spd = pd.concat(dfs)
parent_well_i = spd.i.copy()
parent_well_j = spd.j.copy()
parent_well_k = spd.k.copy()
# set boundnames based on well locations in parent model
parent_name = parent.name
spd['boundname'] = ['{}_({},{},{})'.format(parent_name, pk, pi, pj)
for pk, pi, pj in zip(parent_well_k, parent_well_i, parent_well_j)]
parent_well_x = parent.modelgrid.xcellcenters[parent_well_i, parent_well_j]
parent_well_y = parent.modelgrid.ycellcenters[parent_well_i, parent_well_j]
coords = project((parent_well_x, parent_well_y),
model.modelgrid.proj_str,
parent.modelgrid.proj_str)
geoms = [Point(x, y) for x, y in zip(*coords)]
bounds = model.modelgrid.bbox
within = [g.within(bounds) for g in geoms]
i, j = get_ij(model.modelgrid,
parent_well_x[within],
parent_well_y[within])
spd = spd.loc[within].copy()
spd['i'] = i
spd['j'] = j
df = df.append(spd)
# read source data and map onto model space and time discretization
# multiple types of source data can be submitted
elif datasets is not None:
for k, v in datasets.items():
# determine the format
if 'csvfile' in k.lower(): # generic csv
# read csv file and aggregate flow rates to model stress periods
# sum well fluxes co-located in a cell
sd = TransientTabularSourceData.from_config(v,
resolve_duplicates_with='sum',
dest_model=model)
csvdata = sd.get_data()
csvdata.rename(columns={v['data_column']: 'q',
v['id_column']: 'boundname'}, inplace=True)
if 'k' not in csvdata.columns:
if model.nlay > 1:
vfd = vfd_defaults.copy()
vfd.update(v.get('vertical_flux_distribution', {}))
csvdata = assign_layers_from_screen_top_botm(csvdata,
model,
**vfd)
else:
csvdata['k'] = 0
df = df.append(csvdata[columns])
elif k.lower() == 'wells': # generic dict
added_wells = {k: v for k, v in v.items() if v is not None}
if len(added_wells) > 0:
aw = pd.DataFrame(added_wells).T
aw['boundname'] = aw.index
else:
aw = None
if aw is not None:
if 'x' in aw.columns and 'y' in aw.columns:
aw['i'], aw['j'] = get_ij(model.modelgrid,
aw['x'].values,
aw['y'].values)
aw['per'] = aw['per'].astype(int)
aw['k'] = aw['k'].astype(int)
df = df.append(aw)
elif k.lower() == 'wdnr_dataset': # custom input format for WI DNR
# Get steady-state pumping rates
check_source_files([v['water_use'],
v['water_use_points']])
# fill out period stats
period_stats = v['period_stats']
if isinstance(period_stats, str):
period_stats = {kper: period_stats for kper in range(model.nper)}
# separate out stress periods with period mean statistics vs.
# those to be resampled based on start/end dates
resampled_periods = {k: v for k, v in period_stats.items()
if v == 'resample'}
periods_with_dataset_means = {k: v for k, v in period_stats.items()
if k not in resampled_periods}
if len(periods_with_dataset_means) > 0:
wu_means = get_mean_pumping_rates(v['water_use'],
v['water_use_points'],
period_stats=periods_with_dataset_means,
drop_ids=v.get('drop_ids'),
model=model)
df = df.append(wu_means)
if len(resampled_periods) > 0:
wu_resampled = resample_pumping_rates(v['water_use'],
v['water_use_points'],
drop_ids=v.get('drop_ids'),
exclude_steady_state=True,
model=model)
df = df.append(wu_resampled)
# boundary fluxes from parent model
if model.perimeter_bc_type == 'flux':
assert model.parent is not None, "need parent model for TMR cut"
# boundary fluxes
kstpkper = [(0, 0)]
tmr = Tmr(model.parent, model)
# parent periods to copy over
kstpkper = [(0, per) for per in model.cfg['model']['parent_stress_periods']]
bfluxes = tmr.get_inset_boundary_fluxes(kstpkper=kstpkper)
bfluxes['boundname'] = 'boundary_flux'
df = df.append(bfluxes)
for col in ['per', 'k', 'i', 'j']:
df[col] = df[col].astype(int)
# drop any k, i, j locations that are inactive
if model.version == 'mf6':
inactive = model.dis.idomain.array[df.k.values,
df.i.values,
df.j.values] < 1
else:
inactive = model.bas6.ibound.array[df.k.values,
df.i.values,
df.j.values] < 1
# record dropped wells in csv file
# (which might contain wells dropped by other routines)
if np.any(inactive):
#inactive_i, inactive_j = df.loc[inactive, 'i'].values, df.loc[inactive, 'j'].values
dropped = df.loc[inactive].copy()
dropped = dropped.groupby(['k', 'i', 'j']).first().reset_index()
dropped['reason'] = 'in inactive cell'
dropped['routine'] = __name__ + '.setup_wel_data'
append_csv(dropped_wells_file, dropped, index=False, float_format='%g') # append to existing file if it exists
df = df.loc[~inactive].copy()
copy_fluxes_to_subsequent_periods = False
if copy_fluxes_to_subsequent_periods and len(df) > 0:
df = copy_fluxes_to_subsequent_periods(df)
wel_lookup_file = model.cfg['wel']['output_files']['lookup_file'].format(model.name)
wel_lookup_file = os.path.join(model._tables_path, os.path.split(wel_lookup_file)[1])
model.cfg['wel']['output_files']['lookup_file'] = wel_lookup_file
# verify that all wells have a boundname
if df.boundname.isna().any():
no_name = df.boundname.isna()
k, i, j = df.loc[no_name, ['k', 'i', 'j']].T.values
names = ['({},{},{})'.format(k, i, j) for k, i, j in zip(k, i, j)]
df.loc[no_name, 'boundname'] = names
assert not df.boundname.isna().any()
# save a lookup file with well site numbers/categories
df.sort_values(by=['boundname', 'per'], inplace=True)
df[['per', 'k', 'i', 'j', 'q', 'boundname']].to_csv(wel_lookup_file, index=False)
# convert to one-based and comment out header if df will be written straight to external file
if for_external_files:
df.rename(columns={'k': '#k'}, inplace=True)
df['#k'] += 1
df['i'] += 1
df['j'] += 1
return df
def assign_layers_from_screen_top_botm(data, model,
flux_col='q',
screen_top_col='screen_top',
screen_botm_col='screen_botm',
label_col='site_no',
across_layers=False,
distribute_by='transmissivity',
minimum_layer_thickness=2.):
"""Assign model layers to pumping flux data based on
open interval. Fluxes are applied to each layer proportional
to the fraction of open interval in that layer.
Parameters
----------
data : dataframe of well info
Must have i, j or x, y locations
model : mfsetup.MF6model or mfsetup.MFnwtModel instance
Must have dis, and optionally, attached MFsetupGrid instance
flux_col : column in data with well fluxes
screen_top_col : column in data with screen top elevations
screen_botm_col : column in data with screen bottom elevations
label_col : column with well names (optional; default site_no)
across_layers : bool
True to distribute fluxes to multipler layers intersected by open interval
distribute_by : str ('thickness' or 'transmissivity')
Distribute fluxes to layers based on thickness or transmissivity of
intersected open intervals.
Returns
-------
data : dataframe of well info, modified so that each row represents
pumping in a single model layer (with fluxes modified proportional
to the amount of open interval in that layer).
"""
# inactive cells in either MODFLOW version
if model.version == 'mf6':
idomain = model.idomain
else:
idomain = model.bas6.ibound.array
# 'boundname' column is used by wel setup for identifying wells
if label_col in data.columns:
data['boundname'] = data[label_col]
if across_layers:
raise NotImplemented('Distributing fluxes to multiple layers')
else:
if distribute_by in {'thickness', 'transmissivity'}:
i, j, x, y, screen_botm, screen_top = None, None, None, None, None, None
if 'i' in data.columns and 'j' in data.columns:
i, j = data['i'].values, data['j'].values
elif 'x' in data.columns and 'y' in data.columns:
raise NotImplementedError('Assigning well layers with just x, y')
x, y = data['x'].values, data['y'].values
if screen_top_col in data.columns:
screen_top = data[screen_top_col].values
if screen_botm_col in data.columns:
screen_botm = data[screen_botm_col].values
# get starting heads if available
no_strt_msg = (f'Well setup: distribute_by: {distribute_by} selected '
'but model has no {} package for computing sat. '
'thickness.\nUsing full layer thickness.')
strt3D = None
if model.version == 'mf6':
strt_package = 'IC'
else:
strt_package = 'BAS6'
if strt_package not in model.get_package_list():
warnings.warn(no_strt_msg.format(strt_package), UserWarning)
strt2D = None
strt3D = None
else:
strt = getattr(getattr(model, strt_package.lower()), 'strt')
strt3D = strt.array
strt2D = strt3D[:, i, j]
thicknesses = get_open_interval_thickness(model,
heads=strt2D,
i=i, j=j, x=x, y=y,
screen_top=screen_top,
screen_botm=screen_botm)
hk = np.ones_like(thicknesses)
if distribute_by == 'transmissivity':
no_k_msg = ('Well setup: distribute_by: transmissivity selected '
'but model has no {} package.\nFalling back to'
'distributing wells by layer thickness.')
if model.version == 'mf6':
hk_package = 'NPF'
hk_var = 'k'
elif model.version == 'mfnwt':
hk_package = 'UPW'
hk_var = 'hk'
else:
hk_package = 'LPF'
hk_var = 'hk'
if hk_package not in model.get_package_list():
warnings.warn(no_k_msg.format(hk_package), UserWarning)
hk = np.ones_like(thicknesses)
else:
hk = getattr(getattr(model, hk_package.lower()), hk_var)
hk = hk.array[:, i, j]
# for each i, j location with a well,
# get the layer with highest transmissivity in the open interval
# if distribute_by == 'thickness' or no hk array,
# T == thicknesses
# round to avoid erratic floating point behavior
# for (nearly) equal quantities
T = np.round(thicknesses * hk, 2)
# to get the deepest occurance of a max value
# (argmax will result in the first, or shallowest)
# take the argmax on the reversed view of the array
# data['k'] = np.argmax(T, axis=0)
T_r = T[::-1]
data['k'] = len(T_r) - np.argmax(T_r, axis=0) - 1
# get thicknesses for all layers
# (including portions of layers outside open interval)
all_layers = np.zeros((model.nlay + 1, model.nrow, model.ncol))
all_layers[0] = model.dis.top.array
all_layers[1:] = model.dis.botm.array
all_layer_thicknesses = np.abs(np.diff(all_layers, axis=0))
layer_thicknesses = -np.diff(all_layers[:, i, j], axis=0)
# only include thicknesses for valid layers
# reset thicknesses to sat. thickness
if strt3D is not None:
sat_thickness = strt3D - model.dis.botm.array
# cells where the head is above the layer top
no_unsat = sat_thickness > all_layer_thicknesses
sat_thickness[no_unsat] = all_layer_thicknesses[no_unsat]
# cells where the head is below the cell bottom
sat_thickness[sat_thickness < 0] = 0
layer_thicknesses = sat_thickness[:, i, j]
# set inactive cells to 0 thickness for the purpose or relocating wells
layer_thicknesses[idomain[:, i, j] < 1] = 0
data['idomain'] = idomain[data['k'], i, j]
data['laythick'] = layer_thicknesses[data['k'].values,
list(range(layer_thicknesses.shape[1]))]
# flag layers that are too thin or inactive
inactive = idomain[data.k, data.i, data.j] < 1
invalid_open_interval = (data['laythick'] < minimum_layer_thickness) | inactive
if any(invalid_open_interval):
outfile = model.cfg['wel']['output_files']['dropped_wells_file'].format(model.name)
# move wells that are still in a thin layer to the thickest active layer
data['orig_layer'] = data['k']
# get T for all layers
T_all_layers = np.round(layer_thicknesses * hk, 2)
# to get the deepest occurance of a max value
# (argmax will result in the first, or shallowest)
# take the argmax on the reversed view of the array
# Tmax_layer = np.argmax(T_all_layers, axis=0)
T_all_layers_r = T_all_layers[::-1]
Tmax_layer = len(T_all_layers_r) - np.argmax(T_all_layers_r, axis=0) - 1
data.loc[invalid_open_interval, 'k'] = Tmax_layer[invalid_open_interval]
data['laythick'] = layer_thicknesses[data['k'].values,
list(range(layer_thicknesses.shape[1]))]
data['idomain'] = idomain[data['k'], i, j]
# record which wells were moved or dropped, and why
bad_wells = data.loc[invalid_open_interval].copy()
bad_wells['category'] = 'moved'
bad_wells['reason'] = 'longest open interval thickness < {} {} minimum'.format(minimum_layer_thickness,
model.length_units)
bad_wells['routine'] = __name__ + '.assign_layers_from_screen_top_botm'
msg = ('Warning: {} of {} wells in layers less than '
'specified minimum thickness of {} {}\n'
'were moved to the thickest layer at their i, j locations.\n'.format(invalid_open_interval.sum(),
len(data),
minimum_layer_thickness,
model.length_units))
still_below_minimum = bad_wells['laythick'] < minimum_layer_thickness
bad_wells.loc[still_below_minimum, 'category'] = 'dropped'
bad_wells.loc[still_below_minimum, 'reason'] = 'no layer above minimum thickness of {} {}'.format(minimum_layer_thickness,
model.length_units)
n_below = np.sum(still_below_minimum)
if n_below > 0:
msg += ('Out of these, {} of {} total wells remaining in layers less than '
'specified minimum thickness of {} {}'
''.format(n_below,
len(data),
minimum_layer_thickness,
model.length_units))
if flux_col in data.columns:
pct_flux_below = 100 * bad_wells.loc[still_below_minimum, flux_col].sum()/data[flux_col].sum()
msg += ', \nrepresenting {:.2f} %% of total flux,'.format(pct_flux_below)
msg += '\nwere dropped. See {} for details.'.format(outfile)
print(msg)
# write shapefile and CSV output for wells that were dropped
cols = ['k', 'i', 'j', 'boundname',
'category', 'laythick', 'idomain', 'reason', 'routine', 'x', 'y']
if flux_col in data.columns:
cols.insert(3, flux_col)
flux_below = bad_wells.groupby(['k', 'i', 'j']).first().reset_index()[cols]
append_csv(outfile, flux_below, index=False, float_format='%g')
if 'x' in flux_below.columns and 'y' in flux_below.columns:
flux_below['geometry'] = [Point(xi, yi) for xi, yi in zip(flux_below.x, flux_below.y)]
df2shp(flux_below, outfile[:-4] + '.shp', epsg=model.modelgrid.epsg)
# cull the wells that are still below the min. layer thickness
data = data.loc[data['laythick'] > minimum_layer_thickness].copy()
else:
raise ValueError('Unrecognized argument for distribute_by: {}'.format(distribute_by))
return data
def get_open_interval_thickness(m,
heads=None,
i=None, j=None, x=None, y=None,
screen_top=None, screen_botm=None, nodata=-999):
"""
Gets the thicknesses of each model layer at specified locations and
open intervals. If heads are supplied, a saturated thickness is determined
for each row, column or x, y location; otherwise, total layer thickness is used.
Returned thicknesses are limited to open intervals (screen_top, screen_botm)
if included, otherwise the layer tops and bottoms and (optionally) the water table
are used.
Parameters
----------
m : mfsetup.MF6model or mfsetup.MFnwtModel instance
Must have dis, and optionally, attached MFsetupGrid instance
heads : 2D array OR 3D array (optional)
numpy array of shape nlay by n locations (2D) OR complete heads array
of the model for one time (3D).
i : 1D array-like of ints, of length n locations
zero-based row indices (optional; alternately specify x, y)
j : 1D array-like of ints, of length n locations
zero-based column indices (optional; alternately specify x, y)
x : 1D array-like of floats, of length n locations
x locations in real world coordinates (optional)
y : 1D array-like of floats, of length n locations
y locations in real world coordinates (optional)
screen_top : 1D array-like of floats, of length n locations
open interval tops (optional; default is model top)
screen_botm : 1D array-like of floats, of length n locations
open interval bottoms (optional; default is model bottom)
nodata : numeric
optional; locations where heads=nodata will be assigned T=0
Returns
-------
T : 2D array of same shape as heads (nlay x n locations)
Transmissivities in each layer at each location
"""
if i is not None and j is not None:
pass
elif x is not None and y is not None:
# get row, col for observation locations
i, j = get_ij(m.modelgrid, x, y)
else:
raise ValueError('Must specify row, column or x, y locations.')
botm = m.dis.botm.array[:, i, j]
if heads is None:
# use model top elevations; expand to nlay x n locations
heads = np.repeat(m.dis.top.array[np.newaxis, i, j], m.nlay, axis=0)
if heads.shape == (m.nlay, m.nrow, m.ncol):
heads = heads[:, i, j]
msg = 'Shape of heads array must be nlay x n locations'
assert heads.shape == botm.shape, msg
# set open interval tops/bottoms to model top/bottom if None
if screen_top is None:
screen_top = m.dis.top.array[i, j]
if screen_botm is None:
screen_botm = m.dis.botm.array[-1, i, j]
# make an array of layer tops
tops = np.empty_like(botm, dtype=float)
tops[0, :] = m.dis.top.array[i, j]
tops[1:, :] = botm[:-1]
# expand top and bottom arrays to be same shape as botm, thickness, etc.
# (so we have an open interval value for each layer)
sctoparr = np.zeros(botm.shape)
sctoparr[:] = screen_top
scbotarr = np.zeros(botm.shape)
scbotarr[:] = screen_botm
# start with layer tops
# set tops above heads to heads
# set tops above screen top to screen top
# (we only care about the saturated open interval)
openinvtop = tops.copy()
openinvtop[openinvtop > heads] = heads[openinvtop > heads]
openinvtop[openinvtop > sctoparr] = sctoparr[openinvtop > screen_top]
# start with layer bottoms
# set bottoms below screened interval to screened interval bottom
# set screen bottoms below bottoms to layer bottoms
openinvbotm = botm.copy()
openinvbotm[openinvbotm < scbotarr] = scbotarr[openinvbotm < screen_botm]
openinvbotm[scbotarr < botm] = botm[scbotarr < botm]
# compute thickness of open interval in each layer
thick = openinvtop - openinvbotm
# assign open intervals above or below model to closest cell in column
not_in_layer = np.sum(thick < 0, axis=0)
not_in_any_layer = not_in_layer == thick.shape[0]
for i, n in enumerate(not_in_any_layer):
if n:
closest = np.argmax(thick[:, i])
thick[closest, i] = 1.
thick[thick < 0] = 0
thick[heads == nodata] = 0 # exclude nodata cells
return thick
def copy_fluxes_to_subsequent_periods(df):
"""Copy fluxes to subsequent stress periods as necessary
so that fluxes aren't unintentionally shut off;
for example if water use is only specified for period 0,
but the added well pumps in period 1, copy water use
fluxes to period 1. This goes against the paradigm of
MODFLOW 6, where wells not specified in a subsequent stress period
are shut off.
"""
last_specified_per = int(df.per.max())
copied_fluxes = [df]
for i in range(last_specified_per):
# only copied fluxes of a given stress period once
# then evaluate copied fluxes (with new stress periods) and copy those once
# after specified per-1, all non-zero fluxes should be propegated
# to last stress period
# copy non-zero fluxes that are not already in subsequent stress periods
if i < len(copied_fluxes):
in_subsequent_periods = copied_fluxes[i].boundname.duplicated(keep=False)
# (copied_fluxes[i].per < last_specified_per) & \
tocopy = (copied_fluxes[i].flux != 0) & \
~in_subsequent_periods
if np.any(tocopy):
copied = copied_fluxes[i].loc[tocopy].copy()
# make sure that wells to be copied aren't in subsequent stress periods
duplicated = np.array([r.boundname in df.loc[df.per > i, 'boundname']
for idx, r in copied.iterrows()])
copied = copied.loc[~duplicated]
copied['per'] += 1
copied_fluxes.append(copied)
df = pd.concat(copied_fluxes, axis=0)
return df
def get_package_stress_period_data(model, package_name, skip_packages=None):
wel_packages = [p for p in model.get_package_list() if package_name in p.lower()]
if skip_packages is not None:
wel_packages = [p for p in wel_packages if p not in skip_packages]
dfs = []
for packagename in wel_packages:
package = model.get_package(packagename)
stress_period_data = package.stress_period_data
for kper, spd in stress_period_data.data.items():
spd = pd.DataFrame(spd)
spd['per'] = kper
dfs.append(spd)
df = | pd.concat(dfs) | pandas.concat |
# Preprocessing
import os, matplotlib
if 'DISPLAY' not in os.environ:
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.max_rows', 50)
import numpy as np
import xgboost as xgb
import xgbfir
import pdb
import time
np.random.seed(1337)
def client_anaylsis():
"""
The idea here is to unify the client ID of several different customers to more broad categories.
"""
# clean duplicate spaces in client names
client_df = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
client_df["NombreCliente"] = client_df["NombreCliente"].str.lower()
client_df["NombreCliente"] = client_df["NombreCliente"].apply(lambda x: " ".join(x.split()))
client_df = client_df.drop_duplicates(subset="Cliente_ID")
special_list = ["^(yepas)\s.*", "^(oxxo)\s.*", "^(bodega\scomercial)\s.*", "^(bodega\saurrera)\s.*", "^(bodega)\s.*",
"^(woolwort|woolworth)\s.*", "^(zona\sexpress)\s.*",
"^(zacatecana)\s.*", "^(yza)\s.*",
"^(yanet)\s.*", "^(yak)\s.*",
"^(wings)\s.*", "^(wendy)\s.*", "^(walmart\ssuper)\s?.*", "^(waldos)\s.*",
"^(wal\smart)\s.*", "^(vulcanizadora)\s.*", "^(viveres\sy\sservicios)\s.*",
"^(vips)\s.*", "^(vinos\sy\slicores)\s.*", "^(tienda\ssuper\sprecio)\s.*",
"^(vinos\sy\sabarrotes)\s.*", "^(vinateria)\s.*", "^(video\sjuegos)\s.*", "^(universidad)\s.*",
"^(tiendas\stres\sb)\s.*", "^(toks)\s.*","^(tkt\ssix)\s.*",
"^(torteria)\s.*", "^(tortas)\s.*", "^(super\sbara)\s.*",
"^(tiendas\sde\ssuper\sprecio)\s.*", "^(ultramarinos)\s.*", "^(tortilleria)\s.*",
"^(tienda\sde\sservicio)\s.*", "^(super\sx)\s.*", "^(super\swillys)\s.*",
"^(super\ssanchez)\s.*", "^(super\sneto)\s.*", "^(super\skompras)\s.*",
"^(super\skiosco)\s.*", "^(super\sfarmacia)\s.*", "^(super\scarnes)\s.*",
"^(super\scarniceria)\s.*", "^(soriana)\s.*", "^(super\scenter)\s.*",
"^(solo\sun\sprecio)\s.*", "^(super\scity)\s.*", "^(super\sg)\s.*", "^(super\smercado)\s.*",
"^(sdn)\s.*", "^(sams\sclub)\s.*", "^(papeleria)\s.*", "^(multicinemas)\s.*",
"^(mz)\s.*", "^(motel)\s.*", "^(minisuper)\s.*", "^(mini\stienda)\s.*",
"^(mini\ssuper)\s.*", "^(mini\smarket)\s.*", "^(mini\sabarrotes)\s.*", "^(mi\sbodega)\s.*",
"^(merza|merzapack)\s.*", "^(mercado\ssoriana)\s.*", "^(mega\scomercial)\s.*",
"^(mc\sdonalds)\s.*", "^(mb)\s[^ex].*", "^(maquina\sfma)\s.*", "^(ley\sexpress)\s.*",
"^(lavamatica)\s.*", "^(kiosko)\s.*", "^(kesos\sy\skosas)\s.*", "^(issste)\s.*",
"^(hot\sdogs\sy\shamburguesas|)\s.*", "^(hamburguesas\sy\shot\sdogs)\s.*", "(hot\sdog)",
"^(hospital)\s.*", "^(hiper\ssoriana)\s.*", "^(super\sahorros)\s.*", "^(super\sabarrotes)\s.*",
"^(hambuerguesas|hamburguesas|hamburgesas)\s.*", "^(gran\sbodega)\s.*",
"^(gran\sd)\s.*", "^(go\smart)\s.*", "^(gasolinera)\s.*", "^(fundacion)\s.*",
"^(fruteria)\s.*", "^(frutas\sy\sverduras)\s.*", "^(frutas\sy\slegumbres)\s.*",
"^(frutas\sy\sabarrotes)\s.*", "^(fma)\s.*", "^(fiesta\sinn)\s.*", "^(ferreteria)\s.*",
"^(farmacon)\s.*", "^(farmacias)\s.*", "^(farmacia\syza)\s.*",
"^(farmacia\smoderna)\s.*", "^(farmacia\slopez)\s.*",
"^(farmacia\sissste)\s.*", "^(farmacia\sisseg)\s.*", "^(farmacia\sguadalajara)\s.*",
"^(farmacia\sesquivar)\s.*", "^(farmacia\scalderon)\s.*", "^(farmacia\sbenavides)\s.*",
"^(farmacia\sabc)\s.*", "^(farmacia)\s.*", "^(farm\sguadalajara)\s.*",
"^(facultad\sde)\s.*", "^(f\sgdl)\s.*", "^(expendio)\s.*", "^(expendio\sde\span)\s.*",
"^(expendio\sde\shuevo)\s.*", "^(expendio\sbimbo)\s.*", "^(expendedoras\sautomaticas)\s.*",
"^(estic)\s.*", "^(estancia\sinfantil)\s.*", "^(estacionamiento)\s.*", "^(estanquillo)\s.*",
"^(estacion\sde\sservicio)\s.*", "^(establecimientos?)\s.*",
"^(escuela\suniversidad|esc\suniversidad)\s.*", "^(escuela\stelesecundaria|esc\stelesecundaria)\s.*",
"^(escuela\stecnica|esc\stecnica)\s.*",
"^(escuela\ssuperior|esc\ssuperior)\s.*", "^(escuela\ssecundaria\stecnica|esc\ssecundaria\stecnica)\s.*",
"^(escuela\ssecundaria\sgeneral|esc\ssecundaria\sgeneral)\s.*",
"^(escuela\ssecundaria\sfederal|esc\ssecundaria\sfederal)\s.*",
"^(escuela\ssecundaria|esc\ssecundaria)\s.*", "^(escuela\sprimaria|esc\sprimaria)\s.*",
"^(escuela\spreparatoria|esc\spreparatoria)\s.*", "^(escuela\snormal|esc\snormal)\s.*",
"^(escuela\sinstituto|esc\sinstituto)\s.*", "^(esc\sprepa|esc\sprep)\s.*",
"^(escuela\scolegio|esc\scolegio)\s.*", "^(escuela|esc)\s.*", "^(dunosusa)\s.*",
"^(ferreteria)\s.*", "^(dulces)\s.*", "^(dulceria)\s.*", "^(dulce)\s.*", "^(distribuidora)\s.*",
"^(diconsa)\s.*", "^(deposito)\s.*", "^(del\srio)\s.*", "^(cyber)\s.*", "^(cremeria)\s.*",
"^(cosina\seconomica)\s.*", "^(copy).*", "^(consumo|consumos)\s.*","^(conalep)\s.*",
"^(comercializadora)\s.*", "^(comercial\ssuper\salianza)\s.*",
"^(comercial\smexicana)\s.*", "^(comedor)\s.*", "^(colegio\sde\sbachilleres)\s.*",
"^(colegio)\s.*", "^(coffe).*", "^(cocteleria|cockteleria)\s.*", "^(cocina\seconomica)\s.*",
"^(cocina)\s.*", "^(cobaev)\s.*", "^(cobaes)\s.*", "^(cobaeh)\s.*", "^(cobach)\s.*",
"^(club\sde\sgolf)\s.*", "^(club\scampestre)\s.*", "^(city\sclub)\s.*", "^(circulo\sk)\s.*",
"^(cinepolis)\s.*", "^(cinemex)\s.*", "^(cinemas)\s.*", "^(cinemark)\s.*", "^(ciber)\s.*",
"^(church|churchs)\s.*", "^(chilis)\s.*", "^(chiles\sy\ssemillas)\s.*", "^(chiles\ssecos)\s.*",
"^(chedraui)\s.*", "^(cetis)\s.*", "^(cervefrio)\s.*", "^(cervefiesta)\s.*",
"^(cerveceria)\s.*", "^(cervecentro)\s.*", "^(centro\sescolar)\s.*", "^(centro\seducativo)\s.*",
"^(centro\sde\sestudios)\s.*", "^(centro\scomercial)\s.*", "^(central\sde\sautobuses)\s.*",
"^(cecytem)\s.*", "^(cecytec)\s.*", "^(cecyte)\s.*", "^(cbtis)\s.*", "^(cbta)\s.*", "^(cbt)\s.*",
"^(caseta\stelefonica)\s.*", "^(caseta)\s.*", "^(casa\sley)\s.*", "^(casa\shernandez)\s.*",
"^(cartonero\scentral)\s.*", "^(carniceria)\s.*", "^(carne\smart)\s.*", "^(calimax)\s.*",
"^(cajero)\s.*", "^(cafeteria)\s.*", "^(cafe)\s.*", "^(burritos)\s.*",
"^(burguer\sking|burger\sking)\s.*", "^(bip)\s.*", "^(bimbo\sexpendio)\s.*",
"^(burguer|burger)\s.*", "^(ba.os)\s.*", "^(bae)\s.*", "^(bachilleres)\s.*", "^(bachillerato)\s.*",
"^(autosercivio|auto\sservicio)\s.*", "^(autolavado|auto\slavado)\s.*",
"^(autobuses\sla\spiedad|autobuses\sde\sla\piedad)\s.*", "^(arrachera)\s.*",
"^(alsuper\sstore)\s.*", "^(alsuper)\s.*", "^(academia)\s.*", "^(abts)\s.*",
"^(abarrotera\slagunitas)\s.*", "^(abarrotera)\s.*", "^(abarrotes\sy\svinos)\s.*",
"^(abarrotes\sy\sverduras)\s.*", "^(abarrotes\sy\ssemillas)\s.*",
"^(abarrotes\sy\spapeleria)\s.*", "^(abarrotes\sy\snovedades)\s.*", "^(abarrotes\sy\sfruteria)\s.*",
"^(abarrotes\sy\sdeposito)\s.*", "^(abarrotes\sy\scremeria)\s.*", "^(abarrotes\sy\scarniceria)\s.*",
"^(abarrotes\svinos\sy\slicores)\s.*", "^(abarrote|abarrotes|abarotes|abarr|aba|ab)\s.*",
"^(7\seleven)\s.*", "^(7\s24)\s.*"]
client_df["NombreCliente2"] = client_df["NombreCliente"]
for var in special_list:
client_df[var] = client_df["NombreCliente"].str.extract(var, expand=False).str.upper()
replace = client_df.loc[~client_df[var].isnull(), var]
client_df.loc[~client_df[var].isnull(),"NombreCliente2"] = replace
client_df.drop(var, axis=1, inplace=True)
client_df.drop("NombreCliente", axis=1, inplace=True)
client_df.to_csv("../data/cliente_tabla2.csv.gz", compression="gzip", index=False)
def client_anaylsis2():
"""
The idea here is to unify the client ID of several different customers to more broad categories in another
different way
"""
client_df = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
# clean duplicate spaces in client names
client_df["NombreCliente"] = client_df["NombreCliente"].str.upper()
client_df["NombreCliente"] = client_df["NombreCliente"].apply(lambda x: " ".join(x.split()))
client_df = client_df.drop_duplicates(subset="Cliente_ID")
# --- Begin Filtering for specific terms
# Note that the order of filtering is significant.
# For example:
# The regex of .*ERIA.* will assign "FRUITERIA" to 'Eatery' rather than 'Fresh Market'.
# In other words, the first filters to occur have a bigger priority.
def filter_specific(vf2):
# Known Large Company / Special Group Types
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*REMISION.*', 'Consignment')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*WAL MART.*', '.*SAMS CLUB.*'], 'Walmart', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*OXXO.*', 'Oxxo Store')
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*CONASUPO.*', 'Govt Store')
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*BIMBO.*', 'Bimbo Store')
# General term search for a random assortment of words I picked from looking at
# their frequency of appearance in the data and common spanish words for these categories
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*COLEG.*', '.*UNIV.*', '.*ESCU.*', '.*INSTI.*', \
'.*PREPAR.*'], 'School', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*PUESTO.*', 'Post')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*FARMA.*', '.*HOSPITAL.*', '.*CLINI.*', '.*BOTICA.*'],
'Hospital/Pharmacy', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*CAFE.*', '.*CREMERIA.*', '.*DULCERIA.*', \
'.*REST.*', '.*BURGER.*', '.*TACO.*', '.*TORTA.*', \
'.*TAQUER.*', '.*HOT DOG.*', '.*PIZZA.*' \
'.*COMEDOR.*', '.*ERIA.*', '.*BURGU.*'], 'Eatery',
regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*SUPER.*', 'Supermarket')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*COMERCIAL.*', '.*BODEGA.*', '.*DEPOSITO.*', \
'.*ABARROTES.*', '.*MERCADO.*', '.*CAMBIO.*', \
'.*MARKET.*', '.*MART .*', '.*MINI .*', \
'.*PLAZA.*', '.*MISC.*', '.*ELEVEN.*', '.*EXP.*', \
'.*SNACK.*', '.*PAPELERIA.*', '.*CARNICERIA.*', \
'.*LOCAL.*', '.*COMODIN.*', '.*PROVIDENCIA.*'
], 'General Market/Mart' \
, regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*VERDU.*', '.*FRUT.*'], 'Fresh Market', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*HOTEL.*', '.*MOTEL.*', ".*CASA.*"], 'Hotel', regex=True)
filter_specific(client_df)
# --- Begin filtering for more general terms
# The idea here is to look for names with particles of speech that would
# not appear in a person's name.
# i.e. "Individuals" should not contain any participles or numbers in their names.
def filter_participle(vf2):
vf2['NombreCliente'] = vf2['NombreCliente'].replace([
'.*LA .*', '.*EL .*', '.*DE .*', '.*LOS .*', '.*DEL .*', '.*Y .*', '.*SAN .*', '.*SANTA .*', \
'.*AG .*', '.*LAS .*', '.*MI .*', '.*MA .*', '.*II.*', '.*[0-9]+.*' \
], 'Small Franchise', regex=True)
filter_participle(client_df)
# Any remaining entries should be "Individual" Named Clients, there are some outliers.
# More specific filters could be used in order to reduce the percentage of outliers in this final set.
def filter_remaining(vf2):
def function_word(data):
# Avoid the single-words created so far by checking for upper-case
if (data.isupper()) and (data != "NO IDENTIFICADO"):
return 'Individual'
else:
return data
vf2['NombreCliente'] = vf2['NombreCliente'].map(function_word)
filter_remaining(client_df)
client_df.rename(columns={"NombreCliente": "client_name3"}, inplace=True)
client_df.to_csv("../data/cliente_tabla3.csv.gz", compression="gzip", index=False)
def preprocess(save=False):
start = time.time()
dtype_dict = {"Semana": np.uint8, 'Agencia_ID': np.uint16, 'Canal_ID': np.uint8,
'Ruta_SAK': np.uint16, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16,
'Demanda_uni_equil': np.uint32, "Venta_hoy": np.float32, "Venta_uni_hoy": np.uint32,
"Dev_uni_proxima": np.uint32, "Dev_proxima": np.float32}
train = pd.read_csv("../data/train.csv.zip", compression="zip", dtype=dtype_dict)
test = pd.read_csv("../data/test.csv.zip", compression="zip", dtype=dtype_dict)
# train = train.sample(100000)
# test = test.sample(100000)
# We calculate out-of-sample mean features from most of the training data and only train from the samples in week 9.
# Out-of-sample mean features for training are calculated from all weeks before week 9 and for the test set from
# all weeks including week 9
mean_dataframes = {}
mean_dataframes["train"] = train[train["Semana"]<9].copy()
mean_dataframes["test"] = train.copy()
print("complete train obs: {}".format(len(train)))
print("train week 9 obs: {}".format(len(train[train["Semana"] == 9])))
train = train[train["Semana"] == 9]
# not used in later stages. Was used to find the right hyperparameters for XGBoost. After finding them and to
# obtain the best solution the evaluation data was incorporated into the training data and the hyperparameters
# were used "blindly"
# eval = train.iloc[int(len(train) * 0.75):, :].copy()
# print("eval obs: {}".format(len(eval)))
# mean_dataframes["eval"] = mean_dataframes["test"].iloc[:eval.index.min(), :].copy()
# train = train.iloc[:int(len(train) * 0.75), :]
# print("train obs: {}".format(len(train)))
# read data files and create new client ids
town = pd.read_csv("../data/town_state.csv.zip", compression="zip")
product = pd.read_csv("../data/producto_tabla.csv.zip", compression="zip")
client = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
client2 = pd.read_csv("../data/cliente_tabla2.csv.gz")
client2.rename(columns={"NombreCliente2": "client_name2"}, inplace=True)
client3 = pd.read_csv("../data/cliente_tabla3.csv.gz")
print("Reading data took {:.1f}min".format((time.time()-start)/60))
new_start = time.time()
# Feature Extraction
prod_split = product.NombreProducto.str.split(r"(\s\d+\s?(kg|Kg|g|G|in|ml|pct|p|P|Reb))")
product["product"] = prod_split.apply(lambda x: x[0])
product["brand2"] = product.NombreProducto.str.extract("^.+\s(\D+) \d+$", expand=False)
product['brand'] = prod_split.apply(lambda x: x[-1]).str.split().apply(lambda x: x[:-1])
product['num_brands'] = product.brand.apply(lambda x: len(x))
product['brand'] = prod_split.apply(lambda x: x[-1]).str.split().apply(lambda x: x[:-1]).astype("str")
product['short_name'] = product['product'].str.split(r'[A-Z][A-Z]').apply(lambda x: x[0])
product["beverage"] = product.NombreProducto.str.extract("\d+(ml)", expand=False)
product.loc[product["beverage"].notnull(), "beverage"] = 1
product["beverage"] = pd.to_numeric(product["beverage"])
product["beverage"] = product["beverage"].fillna(0)
w = product.NombreProducto.str.extract("(\d+)(kg|Kg|g|G|ml)", expand=True)
product["weight"] = w[0].astype("float") * w[1].map({"kg": 1000, "Kg": 1000, "G": 1, "g": 1, "ml": 1})
product["pieces"] = product.NombreProducto.str.extract("(\d+)p\s", expand=False).astype("float")
product["weight_per_piece"] = product["weight"].fillna(0) / product["pieces"].fillna(1)
product.loc[product["short_name"] == "", "short_name"] = product.loc[product["short_name"] == "", "product"]
product.drop(["NombreProducto", "product"], axis=1, inplace=True)
# Drop duplicate clients
client = client.drop_duplicates(subset="Cliente_ID")
# clean duplicate spaces in client names
client["NombreCliente"] = client["NombreCliente"].apply(lambda x: " ".join(x.split()))
# Join everything
dataset_list = ["train", "test"]
for dataset in dataset_list:
mean_dataframes[dataset] = mean_dataframes[dataset].merge(town, how="left", on="Agencia_ID")
mean_dataframes[dataset] = mean_dataframes[dataset].merge(product, how="left", on="Producto_ID")
mean_dataframes[dataset] = mean_dataframes[dataset].merge(client, how="left", on="Cliente_ID")
mean_dataframes[dataset] = mean_dataframes[dataset].merge(client2, how="left", on="Cliente_ID")
mean_dataframes[dataset] = mean_dataframes[dataset].merge(client3, how="left", on="Cliente_ID")
train = train.merge(town, how="left", on="Agencia_ID")
train = train.merge(product, how="left", on="Producto_ID")
train = train.merge(client, how="left", on="Cliente_ID")
train = train.merge(client2, how="left", on="Cliente_ID")
train = train.merge(client3, how="left", on="Cliente_ID")
test = test.merge(town, how="left", on="Agencia_ID")
test = test.merge(product, how="left", on="Producto_ID")
test = test.merge(client, how="left", on="Cliente_ID")
test = test.merge(client2, how="left", on="Cliente_ID")
test = test.merge(client3, how="left", on="Cliente_ID")
rename_dict = {"Semana": "week", "Agencia_ID": "sales_depot_id", "Canal_ID": "sales_channel_id",
"Ruta_SAK": "route_id", "Town": "town", "State": "state",
"Cliente_ID": "client_id", "NombreCliente": "client_name", "Producto_ID": "product_id",
"NombreProducto": "product_name", "Demanda_uni_equil": "target",
"Venta_uni_hoy": "sales_unit_this_week", "Venta_hoy": "sales_this_week",
"Dev_uni_proxima": "returns_unit_next_week", "Dev_proxima": "returns_next_week"}
# rename columns for convenience
for dataset in dataset_list:
mean_dataframes[dataset].rename(columns=rename_dict, inplace=True)
train.rename(columns=rename_dict, inplace=True)
test.rename(columns=rename_dict, inplace=True)
# transform target demand to log scale
for dataset in dataset_list:
mean_dataframes[dataset]["log_demand"] = np.log1p(mean_dataframes[dataset]["target"])
train["log_demand"] = np.log1p(train["target"])
train_target = train["log_demand"]
train.drop(["target", "log_demand", "sales_unit_this_week", "sales_this_week", "returns_unit_next_week",
"returns_next_week"], axis=1, inplace=True)
print("Feature Extraction and merging took {:.1f}min".format((time.time()-new_start)/60))
new_start = time.time()
def get_mean(mean_dataset, dataset, columns, target):
tempTable = mean_dataset[columns + [target]].groupby(columns).agg(
["mean", "std", "median"])[target]
name = "_".join(columns)
tempTable = tempTable.rename(columns={
"count": target + "_count_" + name,
"mean": target + "_mean_" + name,
"std": target + "_std_" + name,
"sum": target + "_sum_" + name,
"median": target + "_median_" + name})
tempTable.reset_index(inplace=True)
dataset = pd.merge(dataset, tempTable, how='left', on=columns)
return dataset
# calculate means of variables that are only available in the training set
train = get_mean(mean_dataframes["train"], train,
["product_id", "client_id", "sales_depot_id", "route_id", "short_name"], "sales_unit_this_week")
test = get_mean(mean_dataframes["test"], test,
["product_id", "client_id", "sales_depot_id", "route_id", "short_name"], "sales_unit_this_week")
column_combinations = [["product_id", "client_id", "sales_depot_id", "route_id"], ["product_id", "route_id"],
["short_name", "client_id", "sales_depot_id"], ["product_id"], ["short_name", "client_id"],
["product_id", "client_id"]]
only_in_train = ["sales_unit_this_week", "sales_this_week", "returns_unit_next_week", "returns_next_week"]
for columns in column_combinations:
for var in only_in_train:
train = get_mean(mean_dataframes["train"], train, columns, var)
test = get_mean(mean_dataframes["test"], test, columns, var)
column_combinations = [["short_name", "client_id", "sales_channel_id"],
["short_name", "town"], ["route_id", "client_name3", "sales_channel_id", "town", "short_name"],
["product_id", "client_name3", "town", "route_id", "short_name"],
["product_id", "client_name3", "town", "short_name", "route_id"],
["short_name", "client_id", "sales_depot_id", "sales_channel_id", "route_id"],
["product_id", "client_id", "sales_depot_id", "sales_channel_id", "route_id"],
["short_name", "client_id", "town"], ["client_name3", "short_name"],
["client_name3", "short_name", "sales_depot_id"],
["product_id", "client_name3", "sales_depot_id", "short_name", "route_id"],
["client_name3", "short_name", "product_id"],
["client_name3", "short_name", "route_id"],
["client_name2", "short_name", "product_id"], ["client_name2", "short_name", "route_id"],
["product_id", "client_id", "route_id", "short_name", "sales_depot_id"],
["product_id", "client_id", "route_id", "short_name"],
["product_id", "client_id", "sales_depot_id", "short_name"],
["route_id", "product_id", "short_name"], ["route_id", "client_id", "short_name"],
["product_id", "client_id", "short_name"], ["product_id", "short_name"],
["short_name", "sales_depot_id"], ["short_name", "client_id", "sales_depot_id"],
["route_id", "client_id"], ["route_id", "short_name"], ["client_name2", "short_name"],
["product_id", "route_id"], ["product_id", "client_id", "sales_depot_id"],
["product_id", "client_id"], ["product_id", "client_id", "sales_depot_id", "route_id"],
["product_id", "client_id", "route_id"], ["product_id", "client_name"],
["short_name", "client_id"]]
# calculate out-of-sample means of the target feature over combinations of categorical features (product_id,
# client_id, etc.)
for columns in column_combinations:
train = get_mean(mean_dataframes["train"], train, columns, "log_demand")
test = get_mean(mean_dataframes["test"], test, columns, "log_demand")
train['null_count'] = train.isnull().sum(axis=1).tolist()
test['null_count'] = test.isnull().sum(axis=1).tolist()
for feat in ["sales_depot_id", "sales_channel_id", "route_id", "town", "state", "client_id", "client_name",
"client_name2", "client_name3", "product_id", "brand", "brand2", "short_name"]:
for dataset in dataset_list:
mean_train = mean_dataframes[dataset]
# LOG DEMAND MEANS
tempTable = mean_train[[feat, "log_demand"]].groupby(feat).agg(["count", "mean", "std", "sum",
"median"]).log_demand
tempTable = tempTable.rename(
columns={"count": "count_"+feat, "mean": "mean_"+feat, "std": "sd_"+feat, "sum": "sum_"+feat,
"median": "median_" + feat})
tempTable.reset_index(inplace=True)
if dataset == "train":
train = pd.merge(train, tempTable, how='left', on=feat)
else:
test = | pd.merge(test, tempTable, how='left', on=feat) | pandas.merge |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# File called _pytest for PyCharm compatability
import ast
import time
from io import StringIO
import pandas as pd
from pandas.testing import assert_frame_equal
import eland as ed
from tests import ES_TEST_CLIENT, FLIGHTS_INDEX_NAME
from tests.common import ROOT_DIR, TestData
class TestDataFrameToCSV(TestData):
def test_to_csv_head(self):
results_file = ROOT_DIR + "/dataframe/results/test_to_csv_head.csv"
ed_flights = self.ed_flights().head()
pd_flights = self.pd_flights().head()
ed_flights.to_csv(results_file)
# Converting back from csv is messy as pd_flights is created from a json file
pd_from_csv = pd.read_csv(
results_file,
index_col=0,
converters={
"DestLocation": lambda x: ast.literal_eval(x),
"OriginLocation": lambda x: ast.literal_eval(x),
},
)
pd_from_csv.index = pd_from_csv.index.map(str)
pd_from_csv.timestamp = pd.to_datetime(pd_from_csv.timestamp)
assert_frame_equal(pd_flights, pd_from_csv)
def test_to_csv_full(self):
results_file = ROOT_DIR + "/dataframe/results/test_to_csv_full.csv"
# Test is slow as it's for the full dataset, but it is useful as it goes over 10000 docs
ed_flights = self.ed_flights()
pd_flights = self.pd_flights()
ed_flights.to_csv(results_file)
# Converting back from csv is messy as pd_flights is created from a json file
pd_from_csv = pd.read_csv(
results_file,
index_col=0,
converters={
"DestLocation": lambda x: ast.literal_eval(x),
"OriginLocation": lambda x: ast.literal_eval(x),
},
)
pd_from_csv.index = pd_from_csv.index.map(str)
pd_from_csv.timestamp = pd.to_datetime(pd_from_csv.timestamp)
assert_frame_equal(pd_flights, pd_from_csv)
# Now read the csv to an index
now_millis = int(round(time.time() * 1000))
test_index = FLIGHTS_INDEX_NAME + "." + str(now_millis)
ed_flights_from_csv = ed.csv_to_eland(
results_file,
ES_TEST_CLIENT,
test_index,
index_col=0,
es_refresh=True,
es_type_overrides={
"OriginLocation": "geo_point",
"DestLocation": "geo_point",
},
converters={
"DestLocation": lambda x: ast.literal_eval(x),
"OriginLocation": lambda x: ast.literal_eval(x),
},
)
pd_flights_from_csv = ed.eland_to_pandas(ed_flights_from_csv)
# TODO - there is a 'bug' where the Elasticsearch index returns data in a different order to the CSV
print(ed_flights_from_csv.head())
print(pd_flights_from_csv.head())
# clean up index
ES_TEST_CLIENT.indices.delete(index=test_index)
def test_pd_to_csv_without_filepath(self):
ed_flights = self.ed_flights()
pd_flights = self.pd_flights()
ret = ed_flights.to_csv()
results = StringIO(ret)
# Converting back from csv is messy as pd_flights is created from a json file
pd_from_csv = pd.read_csv(
results,
index_col=0,
converters={
"DestLocation": lambda x: ast.literal_eval(x),
"OriginLocation": lambda x: ast.literal_eval(x),
},
)
pd_from_csv.index = pd_from_csv.index.map(str)
pd_from_csv.timestamp = | pd.to_datetime(pd_from_csv.timestamp) | pandas.to_datetime |
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Tuple
import numpy as np
import pandapower as pp
import pandas as pd
from pandapower.control import ConstControl
from pandapower.timeseries import DFData, OutputWriter, run_timeseries
from tqdm import tqdm
from conf.conf import SIM_DIR
from src.simulation.network import NetData
"""
Implementation of the PandaPower simulation tool, to generate voltages and currents from network and loads.
Copyright @donelef, @jbrouill on GitHub
"""
@dataclass
class SimulationResult(object):
"""
Data class to store voltages, loads and loading percentage of the grid.
This is generated by the PandaPower simulation and can be read/written to files.
"""
vm_pu: pd.DataFrame
va_degree: pd.DataFrame
p_mw: pd.DataFrame
q_mvar: pd.DataFrame
loading_percent: pd.DataFrame
result_path: Path
@staticmethod
def from_dir(dir_path: Path):
return SimulationResult(
result_path=dir_path,
**{f.stem: | pd.read_csv(f, sep=";", index_col=0) | pandas.read_csv |
"""
Ohsome API client for Python
"""
from json import JSONDecodeError
import geopandas as gpd
import pandas as pd
import json
from ohsome.utils import find_groupby_names
from ohsome.exceptions import OhsomeException
class OhsomeResponse:
"""
Parses the response of an Ohsome request
"""
def __init__(self, response, url=None, params=None):
self.url = url
self.params = params
if isinstance(response, dict):
self.data = response
else:
try:
self.data = response.json()
except JSONDecodeError as e:
raise OhsomeException(message="Invalid JSON response: " + e.msg, error='JSONDecodeError', url=self.url, params=self.params, response=response)
self.ok = response.ok
self.status_code = response.status_code
def as_dataframe(self):
"""
Converts the result to a Pandas DataFrame
:return: pandas dataframe
"""
assert not "features" in self.data.keys(), "GeoJSON object cannot be converted to Pandas Dataframe. Use as_geodataframe instead."
if "result" in self.data.keys():
result_df = pd.DataFrame().from_records(self.data["result"])
if "timestamp" in result_df.columns:
result_df["timestamp"] = | pd.to_datetime(result_df["timestamp"], format="%Y-%m-%dT%H:%M:%SZ") | pandas.to_datetime |
import pandas as pd
s1 = pd.Series([10, 20, 30], name="Total")
s2 = pd.Series(["Jonathan", "Maikao", "Ronald"], name="Clientes")
df = | pd.DataFrame({s2.name: s2, s1.name: s1}) | pandas.DataFrame |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mysql_url() -> str:
conn = os.environ["MYSQL_URL"]
return conn
def test_mysql_without_partition(mysql_url: str) -> None:
query = "select * from test_table limit 3"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 3], dtype="Int64"),
"test_float": pd.Series([1.1, 2.2, 3.3], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_with_partition(mysql_url: str) -> None:
query = "select * from test_table"
df = read_sql(
mysql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 6], dtype="Int64"),
"test_float": pd.Series([1.1, 2.2, 3.3, 4.4, 5.5, 6.6], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_types(mysql_url: str) -> None:
query = "select * from test_types"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_date": pd.Series(["1999-07-25", "2020-12-31", "2021-01-28"], dtype="datetime64[ns]"),
"test_time": pd.Series(["00:00:00", "23:59:59", "12:30:30"], dtype="object"),
"test_datetime": pd.Series(["1999-07-25 00:00:00", "2020-12-31 23:59:59", None], dtype="datetime64[ns]"),
"test_new_decimal": pd.Series([1.1, None, 3.3], dtype="float"),
"test_decimal": pd.Series([1, 2, 3], dtype="float"),
"test_varchar": pd.Series([None, "varchar2", "varchar3"], dtype="object"),
"test_char": pd.Series(["char1", "char2", "char3"], dtype="object")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_types_text(mysql_url: str) -> None:
query = "select * from test_types"
df = read_sql(mysql_url, query, protocol="text")
expected = pd.DataFrame(
index=range(3),
data={
"test_date": pd.Series(["1999-07-25", "2020-12-31", "2021-01-28"], dtype="datetime64[ns]"),
"test_time": pd.Series(["00:00:00", "23:59:59", "12:30:30"], dtype="object"),
"test_datetime": pd.Series(["1999-07-25 00:00:00", "2020-12-31 23:59:59", None], dtype="datetime64[ns]"),
"test_new_decimal": pd.Series([1.1, None, 3.3], dtype="float"),
"test_decimal": pd.Series([1, 2, 3], dtype="float"),
"test_varchar": pd.Series([None, "varchar2", "varchar3"], dtype="object"),
"test_char": pd.Series(["char1", "char2", "char3"], dtype="object")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_more_types(mysql_url: str) -> None:
query = "select * from test_more_types"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_tiny": pd.Series([0, 1, 0], dtype="Int64"),
"test_short": pd.Series([-28, 128, 725], dtype="Int64"),
"test_float": pd.Series([1.1, 2.2, 3.3], dtype="float"),
"test_year": pd.Series([1901, 2021, 2155], dtype="Int64"),
"test_timestamp": pd.Series(["1990-01-01 00:00:01", "2021-08-05 12:12:12", "2038-01-19 03:14:07"], dtype="datetime64[ns]"),
"test_blob": | pd.Series([b"blobblobblobblob1", b"blobblobblobblob2", b"blobblobblobblob3"], dtype="object") | pandas.Series |
# data_functions.py
#!/usr/bin/env ml
# coding: utf-8
# Import libraries
import logging
import pandas as pd
import numpy as np
from pathlib import Path
import argparse
import gc
from scipy import stats
# REDUCE MEMORY USAGE
def reduce_mem_usage(df, verbose=False):
start_mem = df.memory_usage().sum() / 1024 ** 2
int_columns = df.select_dtypes(include=["int"]).columns
float_columns = df.select_dtypes(include=["float"]).columns
for col in int_columns:
df[col] = pd.to_numeric(df[col], downcast="integer")
for col in float_columns:
df[col] = pd.to_numeric(df[col], downcast="float")
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose:
print(
"Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)".format(
end_mem, 100 * (start_mem - end_mem) / start_mem
)
)
return df
# LOAD DATASET
def load_data(file_path, kind='csv'):
data = pd.DataFrame([])
if kind=='csv':
data = pd.read_csv(f"{file_path}.csv", sep=config.CSV_SEP).pipe(reduce_mem_usage)
elif kind=='pickle':
data = pd.read_pickle(f"{file_path}.pkl").pipe(reduce_mem_usage)
elif kind=='parquet':
data = | pd.read_parquet(f"{file_path}.parquet") | pandas.read_parquet |
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Command line :
# python -m benchmark.GG.GB_learning_curve
import os
import logging
from config import SAVING_DIR
from config import DEFAULT_DIR
from config import SEED
import pandas as pd
from visual.misc import set_plot_config
set_plot_config()
from utils.log import set_logger
from utils.log import flush
from utils.log import print_line
from utils.model import get_model
from utils.evaluation import evaluate_classifier
from utils.evaluation import evaluate_summary_computer
from utils.images import gather_images
from problem.gamma_gauss import GGConfig as Config
from problem.gamma_gauss import Generator
from model.gradient_boost import GradientBoostingModel
from ..my_argparser import GB_parse_args
DATA_NAME = 'GG'
BENCHMARK_NAME = DATA_NAME
N_ITER = 30
N_TRAIN_RANGE = [100, 500, 1000, 2500, 5000, 8000, 10000, 12000, 15000, 17000, 20000]
def build_model(args, i_cv):
model = get_model(args, GradientBoostingModel)
model.set_info(DATA_NAME, f"{BENCHMARK_NAME}/learning_curve", i_cv)
return model
def plot_auc(evaluation, model_name="GB", directory=DEFAULT_DIR):
import matplotlib.pyplot as plt
title = f"{model_name} AUC"
x = []
y = []
y_err = []
for n_train_samples, table in evaluation.groupby('n_train_samples'):
x.append(n_train_samples)
y.append(table['valid_auc'].mean())
y_err.append(table['valid_auc'].std())
plt.errorbar(x, y, yerr=y_err, fmt='o', capsize=15, capthick=2, label='AUC')
fname = "auc.png"
plt.xlabel('auc $\\pm$ std')
plt.ylabel('# train samples')
plt.title(title)
plt.legend()
plt.savefig(os.path.join(directory, fname))
plt.clf()
def plot_accuracy(evaluation, model_name="GB", directory=DEFAULT_DIR):
import matplotlib.pyplot as plt
title = f"{model_name} AUC"
x = []
y = []
y_err = []
for n_train_samples, table in evaluation.groupby('n_train_samples'):
x.append(n_train_samples)
y.append(table['valid_accuracy'].mean())
y_err.append(table['valid_accuracy'].std())
plt.errorbar(x, y, yerr=y_err, fmt='o', capsize=15, capthick=2, label='accuracy')
fname = "accuracy.png"
plt.xlabel('accuracy $\\pm$ std')
plt.ylabel('# train samples')
plt.title(title)
plt.legend()
plt.savefig(os.path.join(directory, fname))
plt.clf()
# =====================================================================
# MAIN
# =====================================================================
def main():
# BASIC SETUP
logger = set_logger()
args = GB_parse_args(main_description="Training launcher for Gradient boosting on S3D2 benchmark")
logger.info(args)
flush(logger)
# INFO
model = build_model(args, -1)
os.makedirs(model.results_directory, exist_ok=True)
# config = Config()
# config_table = evaluate_config(config)
# config_table.to_csv(os.path.join(model.results_directory, 'config_table.csv'))
# RUN
evaluation = [run(args, i_cv) for i_cv in range(N_ITER)]
# EVALUATION
evaluation = | pd.concat(evaluation) | pandas.concat |
import os
import sys
import json
import zipfile
import csv
import shutil
import io
from collections import namedtuple
from util import tosec, fsec
import pandas as pd
import matplotlib.pyplot as plt
Trip = namedtuple("Trip", ["service", "route", "direction"])
def main():
if len(sys.argv) != 3:
print("Usage: python3 gtfs_diff.py <zip1> <zip2>")
sys.exit(1)
dirsuffix = "-diff-temp-dir"
zpaths = sys.argv[1:]
tmpdirs = [zpath + dirsuffix for zpath in zpaths]
# step 1: break trips into separate files by (service,route,direction)
split_routes(zpaths, tmpdirs)
# step 2: do a diff on each trip group
diff_routes(zpaths, tmpdirs)
def split_routes(zpaths, tmpdirs):
for zpath, tmpdir in zip(zpaths, tmpdirs):
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
os.mkdir(tmpdir)
with zipfile.ZipFile(zpath) as zf:
with zf.open("trips.txt") as f:
trips = pd.read_csv(f)
assert len(set(trips["trip_id"])) == len(trips)
trips = {row.trip_id: Trip(str(row.service_id),
str(row.route_short_name),
str(row.direction_id))
for row in trips.itertuples()}
curr_path = None
curr_f = None
curr_writer = None
with zf.open("stop_times.txt") as f:
r = csv.reader(io.TextIOWrapper(f))
header = next(r)
trip_id_idx = header.index("trip_id")
for row in r:
trip_id = int(row[trip_id_idx])
trip = trips[trip_id]
this_path = os.path.join(tmpdir, "__".join(trip) + ".txt")
if this_path != curr_path:
if curr_f:
curr_f.close()
curr_path = this_path
is_new = not os.path.exists(curr_path)
curr_f = open(curr_path, "w" if is_new else "a")
curr_writer = csv.writer(curr_f)
if is_new:
curr_writer.writerow(header)
curr_writer.writerow(row)
curr_f.close()
def diff_routes(zpaths, tmpdirs):
html = ["<html><body><h1>Changes</h1>\n"]
names = []
for d in tmpdirs:
names.extend(os.listdir(d))
names = sorted(set(names))
for name in names:
cumsum_trips = []
for tdir in tmpdirs:
path = os.path.join(tdir, name)
if not os.path.exists(path):
cumsum_trips.append(pd.Series(dtype=int))
continue
df = | pd.read_csv(path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
This generates and calls the api
It's worth writing this so that a database is updated - maybe csv based for
each commodity code
@author: <NAME>
@date 08/08/2014
"""
import requests
import pandas as pd
from os.path import join
import os
import time
import unicodedata
import numpy
import datetime
import sys
from os.path import isfile
import logging
class ComtradeApi:
_source_folder = ""
_url = 'http://comtrade.un.org/api/get?'
_ctry_codes = []
_ctry_alt_names = []
_max_partners = 5
_max_years = 5
_last_call_time = 0
_working_df = []
calls_in_hour = 0
first_call = datetime.datetime.now()
max_calls = 95
"""
Pure api call, no updating of a database
"""
def __init__(self, ctry_codes_path="UN Comtrade Country List.csv", fld=""):
self._source_folder = fld
# load the country codes
self._ctry_codes = pd.read_csv(
join(fld, ctry_codes_path), keep_default_na=False, encoding="ISO-8859-1")
# Substitute "End Valid Year" values "Now" with current year
import datetime
year = datetime.datetime.now().year
self._ctry_codes.loc[self._ctry_codes["End Valid Year"] == "Now", "End Valid Year"] = year
self._ctry_codes['End Valid Year'] = self._ctry_codes['End Valid Year'].astype("int32")
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['End Valid Year'] > 2012]
# check to make sure there are correct fields
if ('ISO2-digit Alpha' not in self._ctry_codes.columns) | \
('ISO3-digit Alpha' not in self._ctry_codes.columns):
logging.warning("----------------------------")
logging.warning(" ")
logging.warning("Check format of %s file" % ctry_codes_path)
logging.warning("It appears to be missing fields")
logging.warning(" ")
logging.warning("--------------------------")
return
# Remove NES and other areas
# check the field name for the country code
if "Country Code" in self._ctry_codes.columns:
self._ctry_codes['ctyCode'] = self._ctry_codes['Country Code']
# World
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 0]
# EU-27
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 97]
# LAIA NES
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 473]
# Oceania NES
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 527]
# Europe NES
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 568]
# Other Africa NES
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 577]
# Bunkers
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 837]
# Free Zones
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 838]
# Special Caegories
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 839]
# Areas NES
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 899]
# Neutral Zone
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 536]
# North America and Central America, nes
self._ctry_codes = self._ctry_codes.ix[self._ctry_codes['ctyCode'] != 637]
# Finally remove where the iso codes are not available
self._ctry_codes = self._ctry_codes.ix[pd.isnull(self._ctry_codes['ISO2-digit Alpha']) == False]
self._ctry_codes = self._ctry_codes.ix[ | pd.isnull(self._ctry_codes['ISO3-digit Alpha']) | pandas.isnull |
import pandas
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
f1 = open('Data3-1.txt', 'r')
w1 = f1.readlines()
for line in w1:
w1 = line.split(',')
w1 = [float(x) for x in w1]
f1.close()
f2 = open('Data3-2.txt', 'r')
w2 = f2.readlines()
for line in w2:
w2 = line.split(',')
w2 = [float(x) for x in w2]
f2.close()
f3 = open('Data3-3.txt', 'r')
w3 = f3.readlines()
for line in w3:
w3 = line.split(',')
w3 = [float(x) for x in w3]
f3.close()
data = {
'Data1': w1,
'Data2': w2,
'Data3': w3,
}
from itertools import combinations
from scipy.stats import ttest_ind #Built-in function for one tail
print('1st vs 2nd P-value')
for list1, list2 in combinations(data.keys(), 2):
t, p = ttest_ind(data[list1], data[list2])
print(list1, list2, p)
df = | pandas.DataFrame(data) | pandas.DataFrame |
# Copyright 2021 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import DoubleType, StringType
import pandas as pd
import numpy as np
import os
from fink_science.conversion import mag2fluxcal_snana
from fink_science.utilities import load_scikit_model, load_pcs
from fink_science.kilonova.lib_kn import extract_all_filters_fink
from fink_science.kilonova.lib_kn import get_features_name
from fink_science import __file__
from fink_science.tester import spark_unit_tests
@pandas_udf(DoubleType(), PandasUDFType.SCALAR)
def knscore(jd, fid, magpsf, sigmapsf, model_path=None, pcs_path=None, npcs=None) -> pd.Series:
""" Return the probability of an alert to be a Kilonova using a Random
Forest Classifier.
Parameters
----------
jd: Spark DataFrame Column
JD times (float)
fid: Spark DataFrame Column
Filter IDs (int)
magpsf, sigmapsf: Spark DataFrame Columns
Magnitude from PSF-fit photometry, and 1-sigma error
model_path: Spark DataFrame Column, optional
Path to the trained model. Default is None, in which case the default
model `data/models/KN_model_2PC.pkl` is loaded.
pcs_path: Spark DataFrame Column, optional
Path to the Principal Component file. Default is None, in which case
the `data/models/components.csv` is loaded.
npcs: Spark DataFrame Column, optional
Integer representing the number of Principal Component to use. It
should be consistent to the training model used. Default is None (i.e.
default npcs for the default `model_path`, that is 1).
Returns
----------
probabilities: 1D np.array of float
Probability between 0 (non-KNe) and 1 (KNe).
Examples
----------
>>> from fink_science.utilities import concat_col
>>> from pyspark.sql import functions as F
>>> df = spark.read.load(ztf_alert_sample)
# Required alert columns
>>> what = ['jd', 'fid', 'magpsf', 'sigmapsf']
# Use for creating temp name
>>> prefix = 'c'
>>> what_prefix = [prefix + i for i in what]
# Append temp columns with historical + current measurements
>>> for colname in what:
... df = concat_col(df, colname, prefix=prefix)
# Perform the fit + classification (default model)
>>> args = [F.col(i) for i in what_prefix]
>>> df = df.withColumn('pKNe', knscore(*args))
# Note that we can also specify a model
>>> extra_args = [F.lit(model_path), F.lit(comp_path), F.lit(2)]
>>> args = [F.col(i) for i in what_prefix] + extra_args
>>> df = df.withColumn('pKNe', knscore(*args))
# Drop temp columns
>>> df = df.drop(*what_prefix)
>>> df.agg({"pKNe": "min"}).collect()[0][0]
0.0
>>> df.agg({"pKNe": "max"}).collect()[0][0] < 1.0
True
"""
epoch_lim = [-50, 50]
time_bin = 0.25
flux_lim = 0
# Flag empty alerts
mask = magpsf.apply(lambda x: np.sum(np.array(x) == np.array(x))) > 1
if len(jd[mask]) == 0:
return pd.Series(np.zeros(len(jd), dtype=float))
# add an exploded column with SNID
df_tmp = pd.DataFrame.from_dict(
{
'jd': jd[mask],
'SNID': range(len(jd[mask]))
}
)
df_tmp = df_tmp.explode('jd')
# compute flux and flux error
data = [mag2fluxcal_snana(*args) for args in zip(
magpsf[mask].explode(),
sigmapsf[mask].explode())]
flux, error = np.transpose(data)
# make a Pandas DataFrame with exploded series
pdf = pd.DataFrame.from_dict({
'SNID': df_tmp['SNID'],
'MJD': df_tmp['jd'],
'FLUXCAL': flux,
'FLUXCALERR': error,
'FLT': fid[mask].explode().replace({1: 'g', 2: 'r'})
})
# Load pre-trained model `clf`
if model_path is not None:
model = load_scikit_model(model_path.values[0])
else:
curdir = os.path.dirname(os.path.abspath(__file__))
model_path = curdir + '/data/models/KN_model_2PC.pkl'
model = load_scikit_model(model_path)
# Load pcs
if npcs is not None:
npcs = int(npcs.values[0])
else:
npcs = 2
if pcs_path is not None:
pcs_path_ = pcs_path.values[0]
else:
curdir = os.path.dirname(os.path.abspath(__file__))
pcs_path_ = curdir + '/data/models/components.csv'
pcs = load_pcs(pcs_path_, npcs=npcs)
test_features = []
filters = ['g', 'r']
# extract features (all filters) for each ID
for id in np.unique(pdf['SNID']):
pdf_sub = pdf[pdf['SNID'] == id]
pdf_sub = pdf_sub[pdf_sub['FLUXCAL'] == pdf_sub['FLUXCAL']]
features = extract_all_filters_fink(
epoch_lim=epoch_lim, pcs=pcs,
time_bin=time_bin, filters=filters,
lc=pdf_sub, flux_lim=flux_lim)
test_features.append(features)
# Remove pathological values
names_root = [
'npoints_',
'residuo_'
] + [
'coeff' + str(i + 1) + '_' for i in range(len(pcs.keys()))
] + ['maxflux_']
columns = [i + j for j in ['g', 'r'] for i in names_root]
matrix = pd.DataFrame(test_features, columns=columns)
zeros = np.logical_or(
matrix['coeff1_g'].values == 0,
matrix['coeff1_r'].values == 0
)
matrix_clean = matrix[~zeros]
# If all alerts are flagged as bad
if np.shape(matrix_clean) == (0, len(get_features_name(npcs))):
to_return = np.zeros(len(jd), dtype=float)
return pd.Series(to_return)
# Otherwise make predictions
probabilities = model.predict_proba(matrix_clean.values)
probabilities_notkne = np.zeros(len(test_features))
probabilities_kne = np.zeros(len(test_features))
probabilities_notkne[~zeros] = probabilities.T[0]
probabilities_kne[~zeros] = probabilities.T[1]
probabilities_ = np.array([probabilities_notkne, probabilities_kne]).T
# Take only probabilities to be Ia
to_return = np.zeros(len(jd), dtype=float)
to_return[mask] = probabilities_.T[1]
return pd.Series(to_return)
@pandas_udf(StringType(), PandasUDFType.SCALAR)
def extract_features_knscore(jd, fid, magpsf, sigmapsf, pcs_path=None, npcs=None) -> pd.Series:
""" Extract features used by the Kilonova classifier (using a Random
Forest Classifier).
Parameters
----------
jd: Spark DataFrame Column
JD times (float)
fid: Spark DataFrame Column
Filter IDs (int)
magpsf, sigmapsf: Spark DataFrame Columns
Magnitude from PSF-fit photometry, and 1-sigma error
pcs_path: Spark DataFrame Column, optional
Path to the Principal Component file. Default is None, in which case
the `data/models/components.csv` is loaded.
npcs: Spark DataFrame Column, optional
Integer representing the number of Principal Component to use. It
should be consistent to the training model used. Default is None (i.e.
default npcs for the default `model_path`, that is 1).
Returns
----------
out: str
comma separated features
Examples
----------
>>> from pyspark.sql.functions import split
>>> from pyspark.sql.types import FloatType
>>> from fink_science.utilities import concat_col
>>> from fink_science.kilonova.lib_kn import get_features_name
>>> from pyspark.sql import functions as F
>>> df = spark.read.load(ztf_alert_sample)
# Required alert columns
>>> what = ['jd', 'fid', 'magpsf', 'sigmapsf']
# Use for creating temp name
>>> prefix = 'c'
>>> what_prefix = [prefix + i for i in what]
# Append temp columns with historical + current measurements
>>> for colname in what:
... df = concat_col(df, colname, prefix=prefix)
# Perform the fit + classification (default model)
>>> args = [F.col(i) for i in what_prefix]
>>> df = df.withColumn('features', extract_features_knscore(*args))
>>> KN_FEATURE_NAMES_2PC = get_features_name(2)
>>> for name in KN_FEATURE_NAMES_2PC:
... index = KN_FEATURE_NAMES_2PC.index(name)
... df = df.withColumn(name, split(df['features'], ',')[index].astype(FloatType()))
# Trigger something
>>> df.agg({KN_FEATURE_NAMES_2PC[0]: "min"}).collect()[0][0]
0.0
"""
epoch_lim = [-50, 50]
time_bin = 0.25
flux_lim = 0
# Flag empty alerts
mask = magpsf.apply(lambda x: np.sum(np.array(x) == np.array(x))) > 1
if len(jd[mask]) == 0:
return pd.Series(np.zeros(len(jd), dtype=float))
# add an exploded column with SNID
df_tmp = pd.DataFrame.from_dict(
{
'jd': jd[mask],
'SNID': range(len(jd[mask]))
}
)
df_tmp = df_tmp.explode('jd')
# compute flux and flux error
data = [mag2fluxcal_snana(*args) for args in zip(
magpsf[mask].explode(),
sigmapsf[mask].explode())]
flux, error = np.transpose(data)
# make a Pandas DataFrame with exploded series
pdf = pd.DataFrame.from_dict({
'SNID': df_tmp['SNID'],
'MJD': df_tmp['jd'],
'FLUXCAL': flux,
'FLUXCALERR': error,
'FLT': fid[mask].explode().replace({1: 'g', 2: 'r'})
})
# Load pcs
if npcs is not None:
npcs = int(npcs.values[0])
else:
npcs = 2
if pcs_path is not None:
pcs_path_ = pcs_path.values[0]
else:
curdir = os.path.dirname(os.path.abspath(__file__))
pcs_path_ = curdir + '/data/models/components.csv'
pcs = load_pcs(pcs_path_, npcs=npcs)
test_features = []
filters = ['g', 'r']
# extract features (all filters) for each ID
for id in np.unique(pdf['SNID']):
pdf_sub = pdf[pdf['SNID'] == id]
pdf_sub = pdf_sub[pdf_sub['FLUXCAL'] == pdf_sub['FLUXCAL']]
features = extract_all_filters_fink(
epoch_lim=epoch_lim, pcs=pcs,
time_bin=time_bin, filters=filters,
lc=pdf_sub, flux_lim=flux_lim)
test_features.append(features)
to_return_features = np.zeros(
(len(jd), len(get_features_name(npcs))),
dtype=float
)
to_return_features[mask] = test_features
concatenated_features = [
','.join(np.array(i, dtype=str)) for i in to_return_features
]
return | pd.Series(concatenated_features) | pandas.Series |
import pandas as pd
data_path = '../data/'
store_id_map = pd.read_csv(data_path + 'store_id_relation.csv').set_index('hpg_store_id',drop=False)
air_reserve = | pd.read_csv(data_path + 'air_reserve.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import copy
import math
from shapely.geometry.polygon import Polygon
# A shared random state will ensure that data is split in a same way in both train and test function
RANDOM_STATE = 42
def load_tabular_features_hadoop(distribution='all', matched=False, scale='all', minus_one=False):
tabular_path = 'data/join_results/train/join_cardinality_data_points_sara.csv'
print(tabular_path)
tabular_features_df = | pd.read_csv(tabular_path, delimiter='\\s*,\\s*', header=0) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from sklearn.metrics import confusion_matrix
# # Problem 1 (K-means)
# In[2]:
pi = [0.2,0.5,0.3]
num_obs = 500
# In[3]:
mean = np.array([[0,0],[3,0],[0,3]])
cov = np.array([[1,0],[0,1]])
data= []
label = []
for _ in range(num_obs):
gaus_index = np.random.choice(3,p=pi)
label.append(gaus_index)
x,y = (np.random.multivariate_normal(mean[gaus_index], cov, 1).T)
data.append([x[0],y[0]])
data = np.array(data)
# In[5]:
scatter = plt.scatter(data[:,0],data[:,1],c=label)
plt.scatter(mean[:,0],mean[:,1],c="red")
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Original Distribution of points")
plt.show()
# In[6]:
def K_Means(data,K,num_iter=20,plot=False,show_values=False):
num_iter = num_iter
num_obs = len(data)
c = np.zeros(num_obs)
mu =np.array(random.sample(list(data),K))
if(show_values):
print("Initialized cluster centers are:")
print(mu)
if(plot):
plt.scatter(data[:,0],data[:,1],c=c)
plt.scatter(mu[:,0],mu[:,1],c="red")
plt.xlabel("X")
plt.ylabel("Y")
plt.suptitle("Distribution of points (colored by clusters)")
plt.title("(Initially assigning to one cluster)")
plt.show()
objective = []
for _ in range(num_iter):
for i in range(num_obs):
temp = [np.linalg.norm(data[i]-val)**2 for val in mu]
c[i] = (np.argmin(temp))
objective.append(compute_KMeans_Objective(data,c,mu))
for i in range(len(mu)):
temp = [data[index] for index in range(num_obs) if c[index] == i]
mu[i] = (np.mean(temp,axis=0))
objective.append(compute_KMeans_Objective(data,c,mu))
if(plot):
plt.scatter(data[:,0],data[:,1],c=c)
plt.scatter(mu[:,0],mu[:,1],c="red")
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Distribution of points (colored by clusters)")
plt.show()
if(show_values):
print("The learned cluster centers are:")
print(mu)
return [c,mu,objective]
# In[7]:
def compute_KMeans_Objective(d,labels,centers):
loss = 0
for i in range(len(d)):
for j in range(len(centers)):
if(labels[i]==j):
loss+=np.linalg.norm(data[i]-centers[j])**2
return loss
# In[8]:
Ks = [2,3,4,5]
Cs = []
MUs = []
OBJs = []
for k in Ks:
plot= k == 3 or k==5
c,mu,obj = K_Means(data,k,num_iter=20,plot=plot)
Cs.append(c)
MUs.append(mu)
OBJs.append(obj)
# In[9]:
for i in range(len(OBJs)):
obj = OBJs[i]
obj1 = [obj[i] for i in range(len(obj)) if i%2==0]
obj2 = [obj[i] for i in range(len(obj)) if i%2!=0]
plt.plot([x * .5 for x in range(1,41)],obj, color ="green")
plt.plot([x * .5 for x in range(1,41,2)],obj1,"o",color="blue",mfc='none')
plt.plot([x * .5 for x in range(2,41,2)],obj2,"o",color="red",mfc='none')
plt.xticks(range(0,21))
plt.xlabel("Number of Iterations")
plt.ylabel("Objective Function")
plt.title("Value of the Objective Function for K-Means for K = " + str(Ks[i]))
plt.show()
# # Problem 2 (Bayes classifier revisited)
# In[3]:
X_train = pd.read_csv("Prob2_Xtrain.csv",header=None).values
X_test = pd.read_csv("Prob2_Xtest.csv",header=None).values
y_train = pd.read_csv("Prob2_ytrain.csv",header=None).values
y_test = pd.read_csv("Prob2_ytest.csv",header=None).values
# In[4]:
y_train = np.array([y_train[i][0] for i in range(len(y_train))])
y_test = np.array([y_test[i][0] for i in range(len(y_test))])
# In[5]:
X_train_0 = X_train[y_train == 0]
X_train_1 = X_train[y_train == 1]
# In[6]:
data = [X_train_0,X_train_1]
# In[7]:
def Naive_Bayes(data, pi, mu , sigma, class_priors,num_classes=2):
y_pred = np.zeros(len(data))
K = len(pi[0])
for i in range(len(data)):
prob = np.zeros(num_classes)
class_index = range(num_classes)
for index in class_index:
class_cond_prob = 0
for k in range(K):
N = multivariate_normal.pdf(data[i],mean=mu[index][k],cov=sigma[index][k])
class_cond_prob+=((pi[index][k])*N)
prob[index] = class_cond_prob
label = np.argmax(prob)
y_pred[i] = label
return y_pred
# In[8]:
def EM_GMM(data,k = 3,num_iter = 30,num_run = 10,compute_objective=True):
num_obs = len(data)
Objectives = []
best_phi = np.zeros((num_obs,k))
best_pi = np.full((k,1),1/k)
best_mu = np.random.multivariate_normal(np.mean(data,axis=0), np.cov(data.T), k)
best_Sigma = [np.cov(data.T)] * k
best_objective=-1
for run in range(num_run):
phi = np.zeros((num_obs,k))
pi = np.full((k,1),1/k)
mu = np.random.multivariate_normal(np.mean(data,axis=0), np.cov(data.T), k)
Sigma = np.full((k,data[0].shape[0],data[0].shape[0]),np.cov(data.T))
print("starting run: " + str(run))
objective = []
for _ in range(num_iter):
for i in range(num_obs):
for j in range(k):
phi[i][j] = (pi[j] * multivariate_normal.pdf(data[i],mean=mu[j],cov=Sigma[j],allow_singular=True))
denominator = sum(phi[i])
phi[i] = (phi[i]/denominator)
nk = np.sum(phi,axis=0)
pi = (nk/num_obs)
numerator_mu = np.zeros((k,data[0].shape[0]))
numerator_Sigma = np.zeros((k,data[0].shape[0],data[0].shape[0]))
for i in range(k):
for j in range(num_obs):
numerator_mu[i] += (phi[j][i] * data[i])
mu[i] = numerator_mu[i] / nk[i]
for j in range(num_obs):
temp = (data[j] - mu[i]).reshape(data[j].shape[0],1)
numerator_Sigma[i] += (phi[j][i] * np.matmul(temp,temp.T))
Sigma[i] = numerator_Sigma[i] / nk[i]
if compute_objective:
L = 0
log_pi = np.where(pi > np.exp(-20), np.log(pi), -20)
for i in range(num_obs):
for j in range(k):
M = multivariate_normal.pdf(data[i],mean=mu[j],cov=Sigma[j],allow_singular=True)
if(M<np.exp(-20)):
log_M = -20
else:
log_M = np.log(M)
N = log_pi[j]
L+=(phi[i][j]*(N + log_M))
objective.append(L)
if compute_objective:
print("Objective value for " + str(run) + " run is: " + str(objective[-1]))
Objectives.append(objective)
if(objective[-1]>=best_objective):
best_pi=pi
best_mu=mu
best_Sigma=Sigma
best_phi=phi
best_objective=objective[-1]
print("best objective for this run is: " + str(best_objective))
return [Objectives,best_mu,best_pi,best_Sigma,best_phi]
# In[9]:
num_class = 2
class_priors = np.zeros(num_class)
for i in range(num_class):
class_priors[i] = len(data[i])
class_priors /= (np.sum(class_priors))
# In[9]:
print("Starting EM for class 0")
EM0 = EM_GMM(data[0],k = 3,num_iter = 30,num_run = 10,compute_objective=True)
# In[10]:
print("Starting EM for class 1")
EM1 = EM_GMM(data[1],k = 3,num_iter = 30,num_run = 10,compute_objective=True)
EM = [EM0,EM1]
# In[12]:
for num in range(num_class):
plt.figure(figsize=(7,7))
for i in range(len(EM[num][0])):
plt.plot(range(5,31),EM[num][0][i][4:],label=str(i+1))
plt.xlabel("Number of iterations")
plt.ylabel("Log Joint Likelihood ")
plt.suptitle("For Class: " + str(num))
plt.title("Log marginal objective function for a 3-Gaussian mixture model over 10 different runs and for iterations 5 to 30 ")
plt.legend()
plt.show()
# In[13]:
MU = np.array([EM[0][1],EM[1][1]])
PI = np.array([EM[0][2],EM[1][2]])
SIGMA = np.array([EM[0][3],EM[1][3]])
predictions = Naive_Bayes(data = X_test,
pi = PI,
mu = MU,
sigma = SIGMA,
class_priors = class_priors,
num_classes = num_class)
conf_mat = confusion_matrix(y_true = y_test, y_pred = predictions)
print("The results for 3- Gaussian Mixture Model")
print( | pd.DataFrame(conf_mat) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Preprocess ieee-fraud-detection dataset.
(https://www.kaggle.com/c/ieee-fraud-detection).
Train shape:(590540,394),identity(144233,41)--isFraud 3.5%
Test shape:(506691,393),identity(141907,41)
############### TF Version: 1.13.1/Python Version: 3.7 ###############
"""
import os
import random
import warnings
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
warnings.filterwarnings('ignore')
# make all processes deterministic/固定随机数生成器的种子
# environ是一个字符串所对应环境的映像对象,PYTHONHASHSEED为其中的环境变量
# Python会用一个随机的种子来生成str/bytes/datetime对象的hash值;
# 如果该环境变量被设定为一个数字,它就被当作一个固定的种子来生成str/bytes/datetime对象的hash值
def set_seed(seed=0):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
# reduce memory for dataframe/优化dataframe数据格式,减少内存占用
def reduce_mem_usage(df, verbose=True):
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
reduction = 100*(start_mem-end_mem)/start_mem
if verbose:
print("Default Mem. {:.2f} Mb, Optimized Mem. {:.2f} Mb, Reduction {:.1f}%".
format(start_mem, end_mem, reduction))
return df
# [train+infer]离散特征编码:[NaN不编码]
def minify_identity_df(df):
df['id_12'] = df['id_12'].map({'Found': 1, 'NotFound': 0})
df['id_15'] = df['id_15'].map({'New': 2, 'Found': 1, 'Unknown': 0})
df['id_16'] = df['id_16'].map({'Found': 1, 'NotFound': 0})
df['id_23'] = df['id_23'].map({'IP_PROXY:TRANSPARENT': 3, 'IP_PROXY:ANONYMOUS': 2, 'IP_PROXY:HIDDEN': 1})
df['id_27'] = df['id_27'].map({'Found': 1, 'NotFound': 0})
df['id_28'] = df['id_28'].map({'New': 2, 'Found': 1})
df['id_29'] = df['id_29'].map({'Found': 1, 'NotFound': 0})
df['id_35'] = df['id_35'].map({'T': 1, 'F': 0})
df['id_36'] = df['id_36'].map({'T': 1, 'F': 0})
df['id_37'] = df['id_37'].map({'T': 1, 'F': 0})
df['id_38'] = df['id_38'].map({'T': 1, 'F': 0})
df['id_34'] = df['id_34'].fillna(':3')
df['id_34'] = df['id_34'].apply(lambda x: x.split(':')[1]).astype(np.int8)
df['id_34'] = np.where(df['id_34'] == 3, np.nan, df['id_34'])
df['id_33'] = df['id_33'].fillna('0x0')
df['id_33_0'] = df['id_33'].apply(lambda x: x.split('x')[0]).astype(int)
df['id_33_1'] = df['id_33'].apply(lambda x: x.split('x')[1]).astype(int)
df['id_33'] = np.where(df['id_33'] == '0x0', np.nan, df['id_33'])
df['DeviceType'].map({'desktop': 1, 'mobile': 0})
return df
if __name__ == "__main__":
print("========== 1.Set random seed ...")
SEED = 42
set_seed(SEED)
LOCAL_TEST = False
print("========== 2.Load csv data ...")
dir_data_csv = os.getcwd() + "\\ieee-fraud-detection\\"
train_tran = | pd.read_csv(dir_data_csv + "\\train_transaction.csv") | pandas.read_csv |
"""A utility class to summarize all results in a directory.
"""
__author__ = '<NAME>'
from dataclasses import dataclass, field
from pathlib import Path
import logging
import pandas as pd
from zensols.util.time import time
from zensols.deeplearn import DatasetSplitType
from . import (
ModelResult, DatasetResult, ModelResultManager, ArchivedResult,
PredictionsDataFrameFactory,
)
logger = logging.getLogger(__name__)
@dataclass
class ModelResultReporter(object):
"""Summarize all results in a directory from the output of model execution from
:class:`~zensols.deeplearn.model.ModelExectuor`.
The class iterates through the pickled binary output files from the run and
summarizes in a Pandas dataframe, which is handy for reporting in papers.
"""
METRIC_DESCRIPTIONS = PredictionsDataFrameFactory.METRIC_DESCRIPTIONS
"""Dictionary of performance metrics column names to human readable
descriptions.
"""
result_manager: ModelResultManager = field()
"""Contains the results to report on--and specifically the path to directory
where the results were persisted.
"""
include_validation: bool = field(default=True)
"""Whether or not to include validation performance metrics."""
@property
def dataframe(self) -> pd.DataFrame:
"""Return the summarized results (see class docs).
:return: the Pandas dataframe of the results
"""
rows = []
cols = 'name file start train_duration converged features '.split()
if self.include_validation:
cols.extend('wF1v wPv wRv mF1v mPv mRv MF1v MPv MRv '.split())
cols.extend(('wF1t wPt wRt mF1t mPt mRt MF1t MPt MRt ' +
'train_occurs validation_occurs test_occurs').split())
dpt_key = 'n_total_data_points'
arch_res: ArchivedResult
for fname, arch_res in self.result_manager.results_stash.items():
res: ModelResult = arch_res.model_result
train: DatasetResult = res.dataset_result.get(DatasetSplitType.train)
validate: DatasetResult = res.dataset_result.get(DatasetSplitType.validation)
test: DatasetResult = res.dataset_result.get(DatasetSplitType.test)
if train is not None:
dur = train.end_time - train.start_time
hours, remainder = divmod(dur.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
dur = f'{hours:02}:{minutes:02}:{seconds:02}'
if validate is not None:
conv_epoch = validate.statistics['n_epoch_converged']
else:
conv_epoch = None
if test is not None:
vm = validate.metrics
tm = test.metrics
features = ', '.join(res.decoded_attributes)
row = [res.name, fname, train.start_time, dur, conv_epoch, features]
if self.include_validation:
row.extend([
vm.weighted.f1, vm.weighted.precision, vm.weighted.recall,
vm.micro.f1, vm.micro.precision, vm.micro.recall,
vm.macro.f1, vm.macro.precision, vm.macro.recall])
row.extend([
tm.weighted.f1, tm.weighted.precision, tm.weighted.recall,
tm.micro.f1, tm.micro.precision, tm.micro.recall,
tm.macro.f1, tm.macro.precision, tm.macro.recall,
train.statistics[dpt_key], validate.statistics[dpt_key],
test.statistics[dpt_key]])
rows.append(row)
if logger.isEnabledFor(logging.INFO):
logger.info('result calculation complete for ' +
f'{res.name} ({fname})')
return | pd.DataFrame(rows, columns=cols) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
__author__ = '<NAME>'
__contact__ = '<EMAIL>'
__copyright__ = '(c) <NAME>'
__license__ = 'MIT'
''' This routine compiles sentinel data (preferably cropped) and
UAV high resolution class rasters and creates tensors suitable for DNN work.
saves a tensor and label vector in npy format
'''
###############################################################################
""" Libraries"""
import pandas as pd
import numpy as np
from skimage import io
import rasterio
#############################################################
"""Inputs"""
#############################################################
SiteList = 'EMPTY'#this has the lists of sites with name, month and year
DatFolder = 'EMPTY' #location of above
#tile size
size=7
#Output location
Outfile = 'EMPTY' #no extensions needed, added later
###############################################################################
'''Functions'''
def map2pix(rasterfile,xmap,ymap):
with rasterio.open(rasterfile) as map_layer:
coords2pix = map_layer.index(xmap,ymap)
return coords2pix
def pix2map(rasterfile,xpix,ypix):
with rasterio.open(rasterfile) as map_layer:
pix2coords = map_layer.xy(xpix,ypix)
return pix2coords
def GetCrispClass(CLS, UL, LR):
ClassOut=np.zeros((1,1,3))
Spot = CLS[UL[0]:LR[0], UL[1]:LR[1]]#
c,counts = np.unique(Spot, return_counts=True)
if len(c)>0:
if (np.min(c)>0):#no UAV class pixels as no data. 10x10m area of S2 pixel is 100% classified
if np.max(counts)>=(0.95*np.sum(counts)):#pure class
ClassOut[0,0,2]=c[np.argmax(counts)]
if np.max(counts)>=(0.5*np.sum(counts)):#majority class
ClassOut[0,0,1]=c[np.argmax(counts)]
if np.max(counts)>(np.sum(counts)/3):#relative majority class, assumes a 3 class problem
ClassOut[0,0,0]=c[np.argmax(counts)]
else:
ClassOut[0,0,0] = -1 #this flags a spot with no data
else:
ClassOut[0,0,0] = -1 #this flags a spot with no data
return ClassOut
def MakeCrispClass(S2Name, UAVClassName, CLS_UAV):
S2 = io.imread(S2Name)
w = S2.shape[0]
h = S2.shape[1]
CrispClass = np.zeros((w,h,3))
for W in range(w):
for H in range(h):
S2coords = pix2map(S2Name, W,H)
UL = map2pix(UAVClassName, S2coords[0]-5, S2coords[1]+5)
LR = map2pix(UAVClassName, S2coords[0]+5, S2coords[1]-5)
CrispClass[W,H,:] = GetCrispClass(CLS_UAV, UL, LR)
return CrispClass
def slide_rasters_to_tiles(im, CLS, size):
h=im.shape[0]
w=im.shape[1]
di=im.shape[2]
try:
dc =CLS.shape[2]
LabelTensor = np.zeros(((h-size)*(w-size), size,size,dc))
except:#case with desk-based polygons having 2D labels
dc=1
LabelTensor = np.zeros(((h-size)*(w-size), size,size,dc))
TileTensor = np.zeros(((h-size)*(w-size), size,size,di))
B=0
for y in range(0, h-size):
for x in range(0, w-size):
#print(str(x)+' '+str(y))
if dc>1:
LabelTensor[B,:,:,:] = CLS[y:y+size,x:x+size,:]#.reshape(size,size,dc)
else:
LabelTensor[B,:,:,0] = CLS[y:y+size,x:x+size]#.reshape(size,size,1)
TileTensor[B,:,:,:] = im[y:y+size,x:x+size,:]#.reshape(size,size,di)
B+=1
return TileTensor, LabelTensor
############################################################################################
'''Main processing'''
#load the site list
SiteDF = pd.read_csv(SiteList)
#Tile size
if size%2 != 0:
middle=size//2
else:
raise Exception('Tile size of '+str(size)+ ' is even and not valid. Please choose an odd tile size')
#initialise the main outputs with Relative majority, Majority and Pure class and Poygon class cases
MasterLabelDict = {'RelMajClass':0,'MajClass':0,'PureClass':0,'PolyClass':0,'Month':0,'Year':0,'Site':'none'}
MasterLabelDF = pd.DataFrame(data=MasterLabelDict, index=[0])
MasterTensor = np.zeros((1,size,size,12))
''''Pass 1: UAV classes'''
##run through the sites in the DF and extract the data
for s in range(len(SiteDF.Site)):
print('Processing UAV classes '+SiteDF.Site[s]+' '+str(SiteDF.Month[s])+' '+str(SiteDF.Year[s]))
# Getting the data
S2Image = DatFolder+SiteDF.Abbrev[s]+'_'+str(SiteDF.Month[s])+'_'+str(SiteDF.Year[s])+'_S2.tif'
Isubset=io.imread(S2Image)
#get both UAV class and S2 class and produce the fuzzy classification on the S2 image dimensions
ClassUAVName = DatFolder+SiteDF.Abbrev[s]+'_'+str(SiteDF.Month[s])+'_'+str(SiteDF.Year[s])+'_UAVCLS.tif'
ClassUAV = io.imread(ClassUAVName)
ClassUAV[ClassUAV<1] = 0 #catch no data <1 but not 0 cases
ClassUAV[ClassUAV>3] = 0 #filter other classes and cases where 255 is the no data value
Ccrisp1 = MakeCrispClass(S2Image, ClassUAVName, ClassUAV)
Ti, Tl = slide_rasters_to_tiles(Isubset, Ccrisp1, size)
labels = np.zeros((Tl.shape[0],7))
LabelDF = | pd.DataFrame(data=labels, columns=['RelMajClass','MajClass','PureClass','PolyClass','Month','Year','Site']) | pandas.DataFrame |
import csv
import os
from multiprocessing import Pool, cpu_count
import numpy as np
import pandas as pd
# these explain which values we are reading, we may want to add more later or something
data_descriptor = {
'Open': 'open.csv',
'High': 'high.csv',
'Low': 'low.csv',
'Close': 'close.csv',
'Adjusted Close': 'adj_close.csv',
'Volatility': 'vol.csv'
}
IMAGE_SEED_KEYS = list(data_descriptor.keys())[0:2]
# sets the reference folder
DATA_FOLDER = os.path.abspath('../data/processed/')
OUTPUT_FOLDER = os.path.abspath('../data/concat/')
DATA_LEN_FILE = os.path.abspath("../data/processed/aapl/close.csv")
BLOCK_SIZE = 1000 # Each data file is chunked into smaller pieces for efficient working
def load_symbols():
temp = None
symbols = os.listdir(DATA_FOLDER)
try:
temp = symbols.remove('.DS_Store') # this is a weird macosx thing
except Exception as e:
print('Could not remove .DS_Store', e)
symbols = symbols if temp is None else temp
return symbols
# symbols = ('goog',)
def construct_path_to_descriptor(symbol, descriptor):
return os.path.join(DATA_FOLDER, symbol, descriptor)
def resource_heavy_way(symbol):
if os.path.exists(OUTPUT_FOLDER) == False:
os.mkdir(OUTPUT_FOLDER)
# needed later
data = {}
print('Loading data for:', symbol)
# read the data
for descriptor in data_descriptor:
path = construct_path_to_descriptor(symbol, data_descriptor[descriptor])
df = | pd.read_csv(path) | pandas.read_csv |
import os.path
import sys
import pandas as pd
import numpy as np
from utils import *
from collegeutils import *
master = pd.DataFrame()
def main():
global master
if not(os.path.isfile('master.csv')):
add_all_college_basketball_prospects()
add_rsci_rank_as_column()
while True:
stop_script = input("We completed Step 2 - do you want to pause the script to do some manual data cleanup? It is recommended. Enter 'yes' or 'no': ").strip()
if (stop_script == 'yes'):
print("Okay, we recommend fixing all of the names and adding missing RSCI ranks from 247sports.")
export_master(master)
sys.exit("Exiting the program to do manual data cleanup.")
elif (stop_script == 'no'):
print("Okay! Continuing with the script.")
break
else:
print("ERROR - That is not a valid input. Please try again.")
else:
while True:
continue_script = input("Seems like you already have a master.csv file. Do you want to pick up from step 3? Enter 'yes' or 'no': ").strip()
if (continue_script == 'yes'):
print("Okay, picking up from Step 3 - adding college stats from Basketball Reference.")
master = pd.read_csv('master.csv')
break
elif (continue_script == 'no'):
print("Okay, exiting the program. You can delete the master.csv file if you want to try again.")
sys.exit("Exiting the program.")
else:
print("ERROR - That is not a valid input. Please try again.")
add_college_stats_from_basketball_reference()
export_master(master)
def add_all_college_basketball_prospects():
"""Get the top 100 prospects each year from NBADraft.net, and add each year's top 100 to a master DataFrame.
Found NBADraft.net to be the simplest to scrape and the most consistent from year-to-year,
their rankings are generally questionable but I'm dropping their rankings anyway.
"""
global master
print("----------------------------------")
print("STEP 1 - Getting the names of all the prospects")
print("----------------------------------")
year_counter = FIRST_YEAR_OF_DRAFT_RANKINGS
while year_counter <= get_current_year():
top100 = []
season = get_season_from_year(year_counter)
print("Getting players from the " + season + " season")
soup_html = find_site("https://www.nbadraft.net/ranking/bigboard/?year-ranking=" + str(year_counter))
if (soup_html):
players = soup_html.find('tbody').findChildren('tr')
for player in players:
stats = player.find_all('td')
index = INDEX_SEPERATING_FIRST_AND_LAST_NAME
row = []
while index < len(stats):
stat_text = stats[index].getText()
if (index == INDEX_SEPERATING_FIRST_AND_LAST_NAME):
stat_text = " ".join(name.text for name in stats[index].findChildren('span')) # Example of list comprehension
row.append(stat_text)
index = index + 1
row.insert(0, season)
top100.append(row)
yearDataFrame = | pd.DataFrame(top100, columns=['Season', 'Name', 'Height', 'Weight', 'Position', 'School', 'Class']) | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from contextlib import nullcontext
import glob
import json
import numpy as np
import pandas
import pytest
import modin.experimental.pandas as pd
from modin.config import Engine
from modin.utils import get_current_execution
from modin.pandas.test.utils import (
df_equals,
get_unique_filename,
teardown_test_files,
test_data,
)
from modin.test.test_utils import warns_that_defaulting_to_pandas
from modin.pandas.test.utils import parse_dates_values_by_id, time_parsing_csv_path
@pytest.mark.skipif(
Engine.get() == "Dask",
reason="Dask does not have experimental API",
)
def test_from_sql_distributed(make_sql_connection): # noqa: F811
if Engine.get() == "Ray":
filename = "test_from_sql_distributed.db"
table = "test_from_sql_distributed"
conn = make_sql_connection(filename, table)
query = "select * from {0}".format(table)
pandas_df = | pandas.read_sql(query, conn) | pandas.read_sql |
# Copyright (c) 2013, greycube and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, add_months
import pandas
def execute(filters=None):
return get_data(filters)
def get_data(filters):
where_clause = []
if filters.get("employee"):
where_clause += [" ec.employee = %(employee)s"]
# if filters.get("month"):
# filters["from_date"] = add_days(add_months(filters.get("from_date"), -1), 16)
# filters["to_date"] = add_days(filters.get("from_date"), 15)
where_clause += ["ec.posting_date BETWEEN %(from_date)s and %(to_date)s"]
where_clause = where_clause and " where " + " and ".join(where_clause) or ""
data = frappe.db.sql(
"""
select %(from_date)s + INTERVAL seq DAY `posting_date`,
ec.name `claim`, ecd.amount,
ecd.expense_type,
coalesce(ecd.description,'') description,
coalesce(ecd.town_worked_cf, '') town_worked_cf,
coalesce(ecd.travel_from_cf, '') travel_from_cf,
coalesce(ecd.travel_to_cf, '') travel_to_cf
FROM seq_0_to_31
left outer join `tabExpense Claim` ec on ec.posting_date = %(from_date)s + INTERVAL seq DAY
left outer join `tabExpense Claim Detail` ecd on ecd.parent = ec.name
{where_clause}
order by seq, claim""".format(
where_clause=where_clause
),
filters,
as_dict=True,
debug=0,
)
if not data:
return [], []
df = | pandas.DataFrame.from_records(data) | pandas.DataFrame.from_records |
import os
import numpy as np
import pandas as pd
import scipy.special as sc_special
from surf.basic_mlp import *
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from collections import deque
import time
import codecs
import random
| pd.set_option('display.max_columns', None) | pandas.set_option |
#!/usr/bin/env python
# coding: utf-8
import sys
import os, os.path
import requests
import time
import pandas as pd
import asyncio
from aiohttp import ClientSession
from bs4 import BeautifulSoup
from multiprocessing.pool import ThreadPool
METADATA_CSV = os.path.join(os.getcwd(), 'data/raw/jobs/jobs_metadata.csv')
OUTPUT_CSV = os.path.join(os.getcwd(), 'data/cleaned/jobs/all.csv')
JOB_STAT_KEYS = [
'countSolarJobs',
'countWindJobs',
'countEnergyJobs',
'totalJobs',
'percentOfStateJobs',
'residentialMWhInvested',
'commercialMWhInvested',
'utilityMWhInvested',
'totalMWhInvested',
'residentialDollarsInvested',
'commercialDollarsInvested',
'utilityDollarsInvested',
'totalDollarsInvested',
'investmentHomesEquivalent',
'countResidentialInstallations',
'countCommercialInstallations',
'countUtilityInstallations',
'countTotalInstallations',
'residentialMWCapacity',
'commercialMWCapacity',
'utilityMWCapacity',
'totalMWCapacity'
]
CSV_KEYS = [
'stateAbbr',
'geoType',
'name',
'geoid',
'sourceURL'
]
CSV_KEYS.extend(JOB_STAT_KEYS)
HTML_STRUCTURE = {
'tables': [
['countSolarJobs', 'countWindJobs', 'countEnergyJobs'],
['residentialDollarsInvested', 'residentialMWhInvested', 'commercialDollarsInvested', 'commercialMWhInvested', 'utilityDollarsInvested', 'utilityMWhInvested'],
['countResidentialInstallations', 'residentialMWCapacity', 'countCommercialInstallations', 'commercialMWCapacity', 'countUtilityInstallations', 'utilityMWCapacity'],
],
'totals': [
['totalJobs', 'percentOfStateJobs'],
['totalDollarsInvested', 'totalMWhInvested', 'investmentHomesEquivalent'],
['countTotalInstallations', 'totalMWCapacity']
]
}
REGION_TYPES = [
('state', 'State'),
('county', 'County'),
('sldu', 'State Senate District'),
('sldl', 'State House District'),
('cd', 'Congressional District')]
def scrape(metadata, attempt=1):
url = metadata['html_url']
_idx = metadata['_idx']
with requests.get(url) as response:
row = {
'stateAbbr': metadata['state_abbr'],
'geoid': metadata['geoid'],
'geoType': metadata['region_type'],
'name': metadata['name'],
'sourceURL': metadata['html_url'],
}
unique_key = url.replace('http://assessor.keva.la/cleanenergyprogress', '')
if attempt > 3:
print("%d: [%d/3] – %s – FAIL – %s" % (_idx, attempt, response.status_code, unique_key))
return None
if response.status_code >= 400:
print("%d: [%d/3] – %s – RETRY – %s" % (_idx, attempt, response.status_code, unique_key))
time.sleep(3)
return scrape(metadata, attempt + 1)
html = response.text
soup = BeautifulSoup(html, 'html5lib')
row['name'] = soup.find('span', id='geography__name').text.strip()
outer_divs = soup.find_all('div', class_='analytics_data')
for keylist, outerdiv in zip(HTML_STRUCTURE['tables'], outer_divs):
tds = outerdiv.find_all('td', class_='table_data')
values = [elem.text.strip() for elem in tds[:len(keylist)]]
for idx, key in enumerate(keylist):
row[key] = values[idx]
li_buckets = soup.find_all('li', class_=None)
if len(li_buckets) != 3:
print("%d: [%d/3] – %s – PARSE – %s" % (_idx, attempt, response.status_code, unique_key))
print("li_buckets:", li_buckets)
print(html)
raise ValueError
for keylist, outerli in zip(HTML_STRUCTURE['totals'], li_buckets):
total_spans = outerli.find_all('span', class_='analytics_total_num')
totals = [elem.text.strip() for elem in total_spans]
if metadata['region_type'] == 'state' and keylist[-1] == 'percentOfStateJobs':
keylist = keylist [:-1]
if len(totals) == 0:
for key in keylist:
row[key] = 0
elif len(totals) != len(keylist):
print("%d: [%d/3] – %s – PARSE – %s" % (_idx, attempt, response.status_code, unique_key))
print("totals:", totals, keylist)
print(html)
raise ValueError
else:
for idx, key in enumerate(keylist):
row[key] = totals[idx]
print("%d: [%d/3] – %s – OK – %s" % (_idx, attempt, response.status_code, unique_key))
return row
def scrape_jobs_data():
jobs_data = None
if os.path.exists(OUTPUT_CSV):
jobs_data = pd.read_csv(OUTPUT_CSV, encoding='ISO-8859-1')
else:
jobs_data = | pd.DataFrame(columns=CSV_KEYS) | pandas.DataFrame |
import os
os.chdir('../')
import numpy as np
import pandas as pd
from src.d01_data.dengue_data_api import DengueDataApi
from src.d04_modeling.poisson_hmm import PoissonHMM
from src.d04_modeling.dynamic_factor_model import DynamicFactorModel
from src.d04_modeling.arx import ARX
dda = DengueDataApi()
x_train, x_validate, y_train, y_validate = dda.split_data(random=False)
cities = y_train.index.get_level_values('city').unique()
model_evaluation = pd.DataFrame(index=pd.Index([]), columns=['run_static', 'city_dummy', 'no_glm',
'num_states', 'city', 'lls', 'forecast_mae',
'in_mae', 'out_mae'])
for run_static in [True]:
for city_dummy in [False]:
for no_glm in [True, False]:
for num_states in [2, 3, 4]:
if no_glm:
if not run_static:
continue
print(run_static, city_dummy, no_glm, num_states)
num_components = 4 if run_static else 3
results = {'run_static': run_static,
'city_dummy': city_dummy,
'no_glm': no_glm,
'num_states': num_states}
if run_static:
num_components = 4
z_train, z_validate, pct_var = dda.get_pca(x_train.copy(), x_validate.copy(), num_components=num_components)
z_train['bias'] = 1.
z_validate['bias'] = 1.
else:
dfm_model = DynamicFactorModel(x_train.copy(), y_train.copy(), factors=num_components, factor_orders=1, idiosyncratic_ar1=True)
dfm_model.fit()
z_train, y_train = dfm_model.get_filtered_factors(x_train.copy(), y_train.copy())
z_validate, y_validate = dfm_model.get_filtered_factors(x_validate.copy(), y_validate.copy())
arx_model = ARX(x_train=z_train.copy(), y_train=y_train.copy(), p={'iq': 2, 'sj': 3}, d=1)
arx_model.fit()
y_train_hat = []
y_validate_hat = []
for city in arx_model.get_cities():
# print(city)
y_train_hat += [arx_model.predict(city, z_train.copy())]
y_validate_hat += [arx_model.predict(city, z_validate.copy())]
y_train_hat = pd.concat(y_train_hat, axis=0, ignore_index=True)
y_validate_hat = | pd.concat(y_validate_hat, axis=0, ignore_index=True) | pandas.concat |
import numbers
from typing import Optional, List, Tuple, Dict
import pathlib
import numpy as np
import pandas as pd
from .input_error_check import ErrorCheck, InputFileError
class RailWheelInput():
def __init__(self) -> None:
"""Parent class for rail/wheel standard materials and geometries.
"""
self.wheel: pd.DataFrame
self.rail: pd.DataFrame
# self.loaded = None
self.error_report: List[str] = []
def read(self, filename: pathlib.Path, sheetname_rail: str, sheetname_wheel: str) -> None:
# only abstract method
self.wheel = pd.read_excel(filename, sheet_name=sheetname_wheel, index_col=0)
self.rail = | pd.read_excel(filename, sheet_name=sheetname_rail, index_col=0) | pandas.read_excel |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try:
import talib
except:
print('PLEASE install TALIB to call these methods')
import pandas as pd
def CMO(Series, timeperiod=14):
res = talib.CMO(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def BBANDS(Series, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
up, middle, low = talib.BBANDS(
Series.values, timeperiod, nbdevup, nbdevdn, matype)
return pd.Series(up, index=Series.index), pd.Series(middle, index=Series.index), pd.Series(low, index=Series.index)
def BETA(SeriesA, SeriesB, timeperiod=5):
res = talib.BETA(SeriesA.values, SeriesB.values, timeperiod)
return pd.Series(res, index=SeriesA.index)
def CORREL(SeriesA, SeriesB, timeperiod=5):
res = talib.BETA(SeriesA.values, SeriesB.values, timeperiod)
return pd.Series(res, index=SeriesA.index)
def DEMA(Series, timeperiod=30):
res = talib.DEMA(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def EMA(Series, timeperiod=30):
res = talib.EMA(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def HT_DCPERIOD(Series):
res = talib.HT_DCPERIOD(Series.values)
return pd.Series(res, index=Series.index)
def HT_DCPHASE(Series):
res = talib.HT_DCPHASE(Series.values)
return pd.Series(res, index=Series.index)
def HT_PHASOR(Series):
res = talib.HT_PHASOR(Series.values)
return pd.Series(res, index=Series.index)
def HT_SINE(Series):
res = talib.HT_SINE(Series.values)
return pd.Series(res, index=Series.index)
def HT_TRENDLINE(Series):
res = talib.HT_TRENDLINE(Series.values)
return pd.Series(res, index=Series.index)
def HT_TRENDMODE(Series):
res = talib.HT_TRENDMODE(Series.values)
return pd.Series(res, index=Series.index)
def KAMA(Series, timeperiod=30):
res = talib.KAMA(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def LINEARREG(Series, timeperiod=14):
res = talib.LINEARREG(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def LINEARREG_ANGLE(Series, timeperiod=14):
res = talib.LINEARREG_ANGLE(Series.values, timeperiod)
return | pd.Series(res, index=Series.index) | pandas.Series |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper for GroupedData to behave similar to pandas GroupBy.
"""
from abc import ABCMeta, abstractmethod
import sys
import inspect
from collections import OrderedDict, namedtuple
from distutils.version import LooseVersion
from functools import partial
from itertools import product
from typing import (
Any,
Callable,
Dict,
Generic,
Iterator,
Mapping,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
TYPE_CHECKING,
)
import warnings
import pandas as pd
from pandas.api.types import is_hashable, is_list_like
if LooseVersion(pd.__version__) >= LooseVersion("1.3.0"):
from pandas.core.common import _builtin_table
else:
from pandas.core.base import SelectionMixin
_builtin_table = SelectionMixin._builtin_table
from pyspark.sql import Column, DataFrame as SparkDataFrame, Window, functions as F
from pyspark.sql.types import (
NumericType,
StructField,
StructType,
StringType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, FrameLike, Label, Name
from pyspark.pandas.typedef import infer_return_type, DataFrameType, ScalarType, SeriesType
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
SPARK_DEFAULT_SERIES_NAME,
SPARK_INDEX_NAME_PATTERN,
)
from pyspark.pandas.missing.groupby import (
MissingPandasLikeDataFrameGroupBy,
MissingPandasLikeSeriesGroupBy,
)
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.config import get_option
from pyspark.pandas.utils import (
align_diff_frames,
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
verify_temp_column_name,
log_advice,
)
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.exceptions import DataError
if TYPE_CHECKING:
from pyspark.pandas.window import RollingGroupby, ExpandingGroupby
# to keep it the same as pandas
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
class GroupBy(Generic[FrameLike], metaclass=ABCMeta):
"""
:ivar _psdf: The parent dataframe that is used to perform the groupby
:type _psdf: DataFrame
:ivar _groupkeys: The list of keys that will be used to perform the grouping
:type _groupkeys: List[Series]
"""
def __init__(
self,
psdf: DataFrame,
groupkeys: List[Series],
as_index: bool,
dropna: bool,
column_labels_to_exclude: Set[Label],
agg_columns_selected: bool,
agg_columns: List[Series],
):
self._psdf = psdf
self._groupkeys = groupkeys
self._as_index = as_index
self._dropna = dropna
self._column_labels_to_exclude = column_labels_to_exclude
self._agg_columns_selected = agg_columns_selected
self._agg_columns = agg_columns
@property
def _groupkeys_scols(self) -> List[Column]:
return [s.spark.column for s in self._groupkeys]
@property
def _agg_columns_scols(self) -> List[Column]:
return [s.spark.column for s in self._agg_columns]
@abstractmethod
def _apply_series_op(
self,
op: Callable[["SeriesGroupBy"], Series],
should_resolve: bool = False,
numeric_only: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _cleanup_and_return(self, psdf: DataFrame) -> FrameLike:
pass
# TODO: Series support is not implemented yet.
# TODO: not all arguments are implemented comparing to pandas' for now.
def aggregate(
self,
func_or_funcs: Optional[Union[str, List[str], Dict[Name, Union[str, List[str]]]]] = None,
*args: Any,
**kwargs: Any,
) -> DataFrame:
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func_or_funcs : dict, str or list
a dict mapping from column name (string) to
aggregate functions (string or list of strings).
Returns
-------
Series or DataFrame
The return can be:
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return Series or DataFrame.
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': [0.362, 0.227, 1.267, -0.562]},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1 1 0.362
1 1 2 0.227
2 2 3 1.267
3 2 4 -0.562
Different aggregations per column
>>> aggregated = df.groupby('A').agg({'B': 'min', 'C': 'sum'})
>>> aggregated[['B', 'C']].sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 1 0.589
2 3 0.705
>>> aggregated = df.groupby('A').agg({'B': ['min', 'max']})
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B
min max
A
1 1 2
2 3 4
>>> aggregated = df.groupby('A').agg('min')
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 1 0.227
2 3 -0.562
>>> aggregated = df.groupby('A').agg(['min', 'max'])
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
min max min max
A
1 1 2 0.227 0.362
2 3 4 -0.562 1.267
To control the output names with different aggregations per column, pandas-on-Spark
also supports 'named aggregation' or nested renaming in .agg. It can also be
used when applying multiple aggregation functions to specific columns.
>>> aggregated = df.groupby('A').agg(b_max=ps.NamedAgg(column='B', aggfunc='max'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max
A
1 2
2 4
>>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), b_min=('B', 'min'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max b_min
A
1 2 1
2 4 3
>>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), c_min=('C', 'min'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max c_min
A
1 2 0.227
2 4 -0.562
"""
# I think current implementation of func and arguments in pandas-on-Spark for aggregate
# is different than pandas, later once arguments are added, this could be removed.
if func_or_funcs is None and kwargs is None:
raise ValueError("No aggregation argument or function specified.")
relabeling = func_or_funcs is None and is_multi_agg_with_relabel(**kwargs)
if relabeling:
(
func_or_funcs,
columns,
order,
) = normalize_keyword_aggregation( # type: ignore[assignment]
kwargs
)
if not isinstance(func_or_funcs, (str, list)):
if not isinstance(func_or_funcs, dict) or not all(
is_name_like_value(key)
and (
isinstance(value, str)
or isinstance(value, list)
and all(isinstance(v, str) for v in value)
)
for key, value in func_or_funcs.items()
):
raise ValueError(
"aggs must be a dict mapping from column name "
"to aggregate functions (string or list of strings)."
)
else:
agg_cols = [col.name for col in self._agg_columns]
func_or_funcs = OrderedDict([(col, func_or_funcs) for col in agg_cols])
psdf: DataFrame = DataFrame(
GroupBy._spark_groupby(self._psdf, func_or_funcs, self._groupkeys)
)
if self._dropna:
psdf = DataFrame(
psdf._internal.with_new_sdf(
psdf._internal.spark_frame.dropna(
subset=psdf._internal.index_spark_column_names
)
)
)
if not self._as_index:
should_drop_index = set(
i for i, gkey in enumerate(self._groupkeys) if gkey._psdf is not self._psdf
)
if len(should_drop_index) > 0:
psdf = psdf.reset_index(level=should_drop_index, drop=True)
if len(should_drop_index) < len(self._groupkeys):
psdf = psdf.reset_index()
if relabeling:
psdf = psdf[order]
psdf.columns = columns
return psdf
agg = aggregate
@staticmethod
def _spark_groupby(
psdf: DataFrame,
func: Mapping[Name, Union[str, List[str]]],
groupkeys: Sequence[Series] = (),
) -> InternalFrame:
groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(groupkeys))]
groupkey_scols = [s.spark.column.alias(name) for s, name in zip(groupkeys, groupkey_names)]
multi_aggs = any(isinstance(v, list) for v in func.values())
reordered = []
data_columns = []
column_labels = []
for key, value in func.items():
label = key if is_name_like_tuple(key) else (key,)
if len(label) != psdf._internal.column_labels_level:
raise TypeError("The length of the key must be the same as the column label level.")
for aggfunc in [value] if isinstance(value, str) else value:
column_label = tuple(list(label) + [aggfunc]) if multi_aggs else label
column_labels.append(column_label)
data_col = name_like_string(column_label)
data_columns.append(data_col)
col_name = psdf._internal.spark_column_name_for(label)
if aggfunc == "nunique":
reordered.append(
F.expr("count(DISTINCT `{0}`) as `{1}`".format(col_name, data_col))
)
# Implement "quartiles" aggregate function for ``describe``.
elif aggfunc == "quartiles":
reordered.append(
F.expr(
"percentile_approx(`{0}`, array(0.25, 0.5, 0.75)) as `{1}`".format(
col_name, data_col
)
)
)
else:
reordered.append(
F.expr("{1}(`{0}`) as `{2}`".format(col_name, aggfunc, data_col))
)
sdf = psdf._internal.spark_frame.select(groupkey_scols + psdf._internal.data_spark_columns)
sdf = sdf.groupby(*groupkey_names).agg(*reordered)
return InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
index_names=[psser._column_label for psser in groupkeys],
index_fields=[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(groupkeys, groupkey_names)
],
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
)
def count(self) -> FrameLike:
"""
Compute count of group, excluding missing values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
>>> df.groupby('A').count().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 2 3
2 2 2
"""
return self._reduce_for_stat_function(F.count, only_numeric=False)
# TODO: We should fix See Also when Series implementation is finished.
def first(self) -> FrameLike:
"""
Compute first of group values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.first, only_numeric=False)
def last(self) -> FrameLike:
"""
Compute last of group values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(
lambda col: F.last(col, ignorenulls=True), only_numeric=False
)
def max(self) -> FrameLike:
"""
Compute max of group values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.max, only_numeric=False)
# TODO: examples should be updated.
def mean(self) -> FrameLike:
"""
Compute mean of groups, excluding missing values.
Returns
-------
pyspark.pandas.Series or pyspark.pandas.DataFrame
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 3.0 1.333333
2 4.0 1.500000
"""
return self._reduce_for_stat_function(F.mean, only_numeric=True)
def min(self) -> FrameLike:
"""
Compute min of group values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.min, only_numeric=False)
# TODO: sync the doc.
def std(self, ddof: int = 1) -> FrameLike:
"""
Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
assert ddof in (0, 1)
return self._reduce_for_stat_function(
F.stddev_pop if ddof == 0 else F.stddev_samp, only_numeric=True
)
def sum(self) -> FrameLike:
"""
Compute sum of group values
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.sum, only_numeric=True)
# TODO: sync the doc.
def var(self, ddof: int = 1) -> FrameLike:
"""
Compute variance of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
assert ddof in (0, 1)
return self._reduce_for_stat_function(
F.var_pop if ddof == 0 else F.var_samp, only_numeric=True
)
# TODO: skipna should be implemented.
def all(self) -> FrameLike:
"""
Returns True if all values in the group are truthful, else False.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
... 'B': [True, True, True, False, False,
... False, None, True, None, False]},
... columns=['A', 'B'])
>>> df
A B
0 1 True
1 1 True
2 2 True
3 2 False
4 3 False
5 3 False
6 4 None
7 4 True
8 5 None
9 5 False
>>> df.groupby('A').all().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
1 True
2 False
3 False
4 True
5 False
"""
return self._reduce_for_stat_function(
lambda col: F.min(F.coalesce(col.cast("boolean"), SF.lit(True))), only_numeric=False
)
# TODO: skipna should be implemented.
def any(self) -> FrameLike:
"""
Returns True if any value in the group is truthful, else False.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
... 'B': [True, True, True, False, False,
... False, None, True, None, False]},
... columns=['A', 'B'])
>>> df
A B
0 1 True
1 1 True
2 2 True
3 2 False
4 3 False
5 3 False
6 4 None
7 4 True
8 5 None
9 5 False
>>> df.groupby('A').any().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
1 True
2 True
3 False
4 True
5 False
"""
return self._reduce_for_stat_function(
lambda col: F.max(F.coalesce(col.cast("boolean"), SF.lit(False))), only_numeric=False
)
# TODO: groupby multiply columns should be implemented.
def size(self) -> Series:
"""
Compute group sizes.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 2, 3, 3, 3],
... 'B': [1, 1, 2, 3, 3, 3]},
... columns=['A', 'B'])
>>> df
A B
0 1 1
1 2 1
2 2 2
3 3 3
4 3 3
5 3 3
>>> df.groupby('A').size().sort_index()
A
1 1
2 2
3 3
dtype: int64
>>> df.groupby(['A', 'B']).size().sort_index()
A B
1 1 1
2 1 1
2 1
3 3 3
dtype: int64
For Series,
>>> df.B.groupby(df.A).size().sort_index()
A
1 1
2 2
3 3
Name: B, dtype: int64
>>> df.groupby(df.A).B.size().sort_index()
A
1 1
2 2
3 3
Name: B, dtype: int64
"""
groupkeys = self._groupkeys
groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(groupkeys))]
groupkey_scols = [s.spark.column.alias(name) for s, name in zip(groupkeys, groupkey_names)]
sdf = self._psdf._internal.spark_frame.select(
groupkey_scols + self._psdf._internal.data_spark_columns
)
sdf = sdf.groupby(*groupkey_names).count()
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
index_names=[psser._column_label for psser in groupkeys],
index_fields=[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(groupkeys, groupkey_names)
],
column_labels=[None],
data_spark_columns=[scol_for(sdf, "count")],
)
return first_series(DataFrame(internal))
def diff(self, periods: int = 1) -> FrameLike:
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame group (default is the element in the same column of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : DataFrame or Series
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.groupby(['b']).diff().sort_index()
a c
0 NaN NaN
1 1.0 3.0
2 NaN NaN
3 NaN NaN
4 NaN NaN
5 NaN NaN
Difference with previous column in a group.
>>> df.groupby(['b'])['a'].diff().sort_index()
0 NaN
1 1.0
2 NaN
3 NaN
4 NaN
5 NaN
Name: a, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._diff(periods, part_cols=sg._groupkeys_scols), should_resolve=True
)
def cumcount(self, ascending: bool = True) -> Series:
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
.. code-block:: python
self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Returns
-------
Series
Sequence number of each element within each group.
Examples
--------
>>> df = ps.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount().sort_index()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False).sort_index()
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
ret = (
self._groupkeys[0]
.rename()
.spark.transform(lambda _: SF.lit(0))
._cum(F.count, True, part_cols=self._groupkeys_scols, ascending=ascending)
- 1
)
internal = ret._internal.resolved_copy
return first_series(DataFrame(internal))
def cummax(self) -> FrameLike:
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cummax
DataFrame.cummax
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cummax().sort_index()
B C
0 NaN 4
1 0.1 4
2 20.0 4
3 10.0 1
It works as below in Series.
>>> df.C.groupby(df.A).cummax().sort_index()
0 4
1 4
2 4
3 1
Name: C, dtype: int64
"""
return self._apply_series_op(
lambda sg: sg._psser._cum(F.max, True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def cummin(self) -> FrameLike:
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cummin
DataFrame.cummin
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cummin().sort_index()
B C
0 NaN 4
1 0.1 3
2 0.1 2
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cummin().sort_index()
0 NaN
1 0.1
2 0.1
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._cum(F.min, True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def cumprod(self) -> FrameLike:
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cumprod
DataFrame.cumprod
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cumprod().sort_index()
B C
0 NaN 4
1 0.1 12
2 2.0 24
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cumprod().sort_index()
0 NaN
1 0.1
2 2.0
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._cumprod(True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def cumsum(self) -> FrameLike:
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cumsum
DataFrame.cumsum
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cumsum().sort_index()
B C
0 NaN 4
1 0.1 7
2 20.1 9
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cumsum().sort_index()
0 NaN
1 0.1
2 20.1
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._cumsum(True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def apply(self, func: Callable, *args: Any, **kwargs: Any) -> Union[DataFrame, Series]:
"""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a DataFrame as its first
argument and return a DataFrame. `apply` will
then take care of combining the results back together into a single
dataframe. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. pandas-on-Spark offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def pandas_div(x) -> ps.DataFrame[int, [float, float]]:
... return x[['B', 'C']] / x[['B', 'C']]
If the return type is specified, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``.
To specify the column names, you can assign them in a NumPy compound type style
as below:
>>> def pandas_div(x) -> ps.DataFrame[("index", int), [("a", float), ("b", float)]]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> pdf = pd.DataFrame({'B': [1.], 'C': [3.]})
>>> def plus_one(x) -> ps.DataFrame[
... (pdf.index.name, pdf.index.dtype), zip(pdf.columns, pdf.dtypes)]:
... return x[['B', 'C']] / x[['B', 'C']]
.. note:: the dataframe within ``func`` is actually a pandas dataframe. Therefore,
any pandas API within this function is allowed.
Parameters
----------
func : callable
A callable that takes a DataFrame as its first argument, and
returns a dataframe.
*args
Positional arguments to pass to func.
**kwargs
Keyword arguments to pass to func.
Returns
-------
applied : DataFrame or Series
See Also
--------
aggregate : Apply aggregate function to the GroupBy object.
DataFrame.apply : Apply a function to a DataFrame.
Series.apply : Apply a function to a Series.
Examples
--------
>>> df = ps.DataFrame({'A': 'a a b'.split(),
... 'B': [1, 2, 3],
... 'C': [4, 6, 5]}, columns=['A', 'B', 'C'])
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Below the functions passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> def plus_min(x):
... return x + x.min()
>>> g.apply(plus_min).sort_index() # doctest: +NORMALIZE_WHITESPACE
A B C
0 aa 2 8
1 aa 3 10
2 bb 6 10
>>> g.apply(sum).sort_index() # doctest: +NORMALIZE_WHITESPACE
A B C
A
a aa 3 10
b b 3 5
>>> g.apply(len).sort_index() # doctest: +NORMALIZE_WHITESPACE
A
a 2
b 1
dtype: int64
You can specify the type hint and prevent schema inference for better performance.
>>> def pandas_div(x) -> ps.DataFrame[int, [float, float]]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> g.apply(pandas_div).sort_index() # doctest: +NORMALIZE_WHITESPACE
c0 c1
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
>>> def pandas_div(x) -> ps.DataFrame[("index", int), [("f1", float), ("f2", float)]]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> g.apply(pandas_div).sort_index() # doctest: +NORMALIZE_WHITESPACE
f1 f2
index
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
In case of Series, it works as below.
>>> def plus_max(x) -> ps.Series[np.int]:
... return x + x.max()
>>> df.B.groupby(df.A).apply(plus_max).sort_index() # doctest: +SKIP
0 6
1 3
2 4
Name: B, dtype: int64
>>> def plus_min(x):
... return x + x.min()
>>> df.B.groupby(df.A).apply(plus_min).sort_index()
0 2
1 3
2 6
Name: B, dtype: int64
You can also return a scalar value as a aggregated value of the group:
>>> def plus_length(x) -> np.int:
... return len(x)
>>> df.B.groupby(df.A).apply(plus_length).sort_index() # doctest: +SKIP
0 1
1 2
Name: B, dtype: int64
The extra arguments to the function can be passed as below.
>>> def calculation(x, y, z) -> np.int:
... return len(x) + y * z
>>> df.B.groupby(df.A).apply(calculation, 5, z=10).sort_index() # doctest: +SKIP
0 51
1 52
Name: B, dtype: int64
"""
if not callable(func):
raise TypeError("%s object is not callable" % type(func).__name__)
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
should_retain_index = should_infer_schema
is_series_groupby = isinstance(self, SeriesGroupBy)
psdf = self._psdf
if self._agg_columns_selected:
agg_columns = self._agg_columns
else:
agg_columns = [
psdf._psser_for(label)
for label in psdf._internal.column_labels
if label not in self._column_labels_to_exclude
]
psdf, groupkey_labels, groupkey_names = GroupBy._prepare_group_map_apply(
psdf, self._groupkeys, agg_columns
)
if is_series_groupby:
name = psdf.columns[-1]
pandas_apply = _builtin_table.get(func, func)
else:
f = _builtin_table.get(func, func)
def pandas_apply(pdf: pd.DataFrame, *a: Any, **k: Any) -> Any:
return f(pdf.drop(groupkey_names, axis=1), *a, **k)
should_return_series = False
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
log_advice(
"If the type hints is not specified for `grouby.apply`, "
"it is expensive to infer the data type internally."
)
limit = get_option("compute.shortcut_limit")
pdf = psdf.head(limit + 1)._to_internal_pandas()
groupkeys = [
pdf[groupkey_name].rename(psser.name)
for groupkey_name, psser in zip(groupkey_names, self._groupkeys)
]
grouped = pdf.groupby(groupkeys)
if is_series_groupby:
pser_or_pdf = grouped[name].apply(pandas_apply, *args, **kwargs)
else:
pser_or_pdf = grouped.apply(pandas_apply, *args, **kwargs)
psser_or_psdf = ps.from_pandas(pser_or_pdf)
if len(pdf) <= limit:
if isinstance(psser_or_psdf, ps.Series) and is_series_groupby:
psser_or_psdf = psser_or_psdf.rename(cast(SeriesGroupBy, self)._psser.name)
return cast(Union[Series, DataFrame], psser_or_psdf)
if len(grouped) <= 1:
with warnings.catch_warnings():
warnings.simplefilter("always")
warnings.warn(
"The amount of data for return type inference might not be large enough. "
"Consider increasing an option `compute.shortcut_limit`."
)
if isinstance(psser_or_psdf, Series):
should_return_series = True
psdf_from_pandas = psser_or_psdf._psdf
else:
psdf_from_pandas = cast(DataFrame, psser_or_psdf)
index_fields = [
field.normalize_spark_type() for field in psdf_from_pandas._internal.index_fields
]
data_fields = [
field.normalize_spark_type() for field in psdf_from_pandas._internal.data_fields
]
return_schema = StructType([field.struct_field for field in index_fields + data_fields])
else:
return_type = infer_return_type(func)
if not is_series_groupby and isinstance(return_type, SeriesType):
raise TypeError(
"Series as a return type hint at frame groupby is not supported "
"currently; however got [%s]. Use DataFrame type hint instead." % return_sig
)
if isinstance(return_type, DataFrameType):
data_fields = cast(DataFrameType, return_type).data_fields
return_schema = cast(DataFrameType, return_type).spark_type
index_fields = cast(DataFrameType, return_type).index_fields
should_retain_index = len(index_fields) > 0
psdf_from_pandas = None
else:
should_return_series = True
dtype = cast(Union[SeriesType, ScalarType], return_type).dtype
spark_type = cast(Union[SeriesType, ScalarType], return_type).spark_type
if is_series_groupby:
data_fields = [
InternalField(
dtype=dtype, struct_field=StructField(name=name, dataType=spark_type)
)
]
else:
data_fields = [
InternalField(
dtype=dtype,
struct_field=StructField(
name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type
),
)
]
return_schema = StructType([field.struct_field for field in data_fields])
def pandas_groupby_apply(pdf: pd.DataFrame) -> pd.DataFrame:
if is_series_groupby:
pdf_or_ser = pdf.groupby(groupkey_names)[name].apply(pandas_apply, *args, **kwargs)
else:
pdf_or_ser = pdf.groupby(groupkey_names).apply(pandas_apply, *args, **kwargs)
if should_return_series and isinstance(pdf_or_ser, pd.DataFrame):
pdf_or_ser = pdf_or_ser.stack()
if not isinstance(pdf_or_ser, pd.DataFrame):
return pd.DataFrame(pdf_or_ser)
else:
return pdf_or_ser
sdf = GroupBy._spark_group_map_apply(
psdf,
pandas_groupby_apply,
[psdf._internal.spark_column_for(label) for label in groupkey_labels],
return_schema,
retain_index=should_retain_index,
)
if should_retain_index:
# If schema is inferred, we can restore indexes too.
if psdf_from_pandas is not None:
internal = psdf_from_pandas._internal.with_new_sdf(
spark_frame=sdf, index_fields=index_fields, data_fields=data_fields
)
else:
index_names: Optional[List[Optional[Tuple[Any, ...]]]] = None
index_spark_columns = [
scol_for(sdf, index_field.struct_field.name) for index_field in index_fields
]
if not any(
[
SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name)
for index_field in index_fields
]
):
index_names = [(index_field.struct_field.name,) for index_field in index_fields]
internal = InternalFrame(
spark_frame=sdf,
index_names=index_names,
index_spark_columns=index_spark_columns,
index_fields=index_fields,
data_fields=data_fields,
)
else:
# Otherwise, it loses index.
internal = InternalFrame(
spark_frame=sdf, index_spark_columns=None, data_fields=data_fields
)
if should_return_series:
psser = first_series(DataFrame(internal))
if is_series_groupby:
psser = psser.rename(cast(SeriesGroupBy, self)._psser.name)
return psser
else:
return DataFrame(internal)
# TODO: implement 'dropna' parameter
def filter(self, func: Callable[[FrameLike], FrameLike]) -> FrameLike:
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame or Series
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> df = ps.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]}, columns=['A', 'B', 'C'])
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
>>> df.B.groupby(df.A).filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
"""
if not callable(func):
raise TypeError("%s object is not callable" % type(func).__name__)
is_series_groupby = isinstance(self, SeriesGroupBy)
psdf = self._psdf
if self._agg_columns_selected:
agg_columns = self._agg_columns
else:
agg_columns = [
psdf._psser_for(label)
for label in psdf._internal.column_labels
if label not in self._column_labels_to_exclude
]
data_schema = (
psdf[agg_columns]._internal.resolved_copy.spark_frame.drop(*HIDDEN_COLUMNS).schema
)
psdf, groupkey_labels, groupkey_names = GroupBy._prepare_group_map_apply(
psdf, self._groupkeys, agg_columns
)
if is_series_groupby:
def pandas_filter(pdf: pd.DataFrame) -> pd.DataFrame:
return pd.DataFrame(pdf.groupby(groupkey_names)[pdf.columns[-1]].filter(func))
else:
f = | _builtin_table.get(func, func) | pandas.core.common._builtin_table.get |
import copy
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
n = new[i] if isinstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def get_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(detail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
formatter = None
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
if formatter is None and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatter:
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (isinstance(element, (float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
elif isinstance(value, Timedelta):
value = value.value
elif isinstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif isinstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = isnull(v)
v = v.astype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
#### FIXME ####
# should use the core.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
def get_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(self, datetime=True, numeric=True, timedelta=True, coerce=False,
copy=True, by_item=True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = com._possibly_convert_objects(
values.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = com._possibly_convert_objects(
self.values.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy
).reshape(self.values.shape)
blocks.append(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(datetime=True,
numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replace):
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replace, value):
blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replace:
blk[0], = blk[0]._replace_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
result = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter,
regex=regex)
if not isinstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
return [self.copy() if copy else self]
@property
def shape(self):
return (len(self.mgr_locs), len(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.fillna(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(values=values.fillna(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,len(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
other = np.array(other, dtype='i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
return value
def fillna(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.copy()
mask = isnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(values.view('i8').ravel(),
tz=None,
format=format,
na_rep=na_rep).reshape(values.shape)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def get_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, copy=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.get_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % com.pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if not block.is_sparse and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(len(self.items),
tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager integrity check
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if f == 'where' and kwargs.get('align', True):
align_copy = True
align_keys = ['other', 'cond']
elif f == 'putmask' and kwargs.get('align', True):
align_copy = False
align_keys = ['new', 'mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k]) for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
applied = getattr(b, f)(**kwargs)
if isinstance(applied, list):
result_blocks.extend(applied)
else:
result_blocks.append(applied)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def isnull(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace,
regex=regex)
if isinstance(result, list):
new_rb.extend(result)
else:
new_rb.append(result)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0,
allow_fill=False)
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [ copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals, placement=block.mgr_locs,
klass=block.__class__, fastpath=True,)]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].values[:, loc]
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isnull(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isnull(self.items)]
# allow a single nan location indexer
if not np.isscalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc, fastpath=fastpath)
else:
if isnull(item):
raise ValueError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0, allow_dups=True)
def iget(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
if not fastpath or block.is_sparse or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager([ block.make_block_same_class(values,
placement=slice(0, len(values)),
ndim=1,
fastpath=True) ],
self.axes[1])
def get_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.get_loc(x)
for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
full_loc[0] = self._blklocs[full_loc[0]]
# FIXME: this may return non-upcasted types?
return blk.values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_sparse = isinstance(value, SparseArray)
value_is_cat = is_categorical(value)
value_is_nonconsolidatable = value_is_sparse or value_is_cat
if value_is_sparse:
# sparse
assert self.ndim == 2
def value_getitem(placement):
return value
elif value_is_cat:
# categorical
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
len(removed_blknos))
self._blknos = com.take_1d(new_blknos, self._blknos, axis=0,
allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_nonconsolidatable:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.copy(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
len(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(values=value_getitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not isinstance(loc, int):
raise TypeError("loc must be int")
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = self.items.insert(loc, item)
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, copy=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit)
return self.reindex_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, copy=copy)
def reindex_indexer(self, new_axis, indexer, axis, fill_value=None,
allow_dups=False, copy=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis,
fill_tuple=(fill_value if fill_value is not None else
blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.getitem_block(slobj,
new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = com.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(self._make_na_block(
placement=mgr_locs, fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's only one item
# and each mgr loc is a copy of that single item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(
blklocs[mgr_locs.indexer], axis=0,
new_mgr_locs=mgr_locs, fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64') if isinstance(indexer, slice) \
else np.asanyarray(indexer, dtype='int64')
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
def merge(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concat_indexes([l, r])
new_blocks = [blk.copy(deep=False)
for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.copy(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.append(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(block.equals(oblock) for block, oblock in
zip(self_blocks, other_blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not isinstance(block, Block):
block = make_block(block,
placement=slice(0, len(axis)),
ndim=1, fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, copy=True):
# if we are the same and don't copy, just return
if self.index.equals(new_axis):
if copy:
return self.copy(deep=True)
else:
return self
values = self._block.get_values()
if indexer is None:
indexer = self.items.get_indexer_for(new_axis)
if fill_value is None:
# FIXME: is fill_value used correctly in sparse blocks?
if not self._block.is_sparse:
fill_value = self._block.fill_value
else:
fill_value = np.nan
new_values = com.take_1d(values, indexer,
fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = com.interpolate_2d(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, copy=copy,
placement=slice(0, len(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.apply('convert', **kwargs)
@property
def dtype(self):
return self._values.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
@property
def values(self):
return self._values.view()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(),copy=False)
@property
def itemsize(self):
return self._values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
implied = tuple(map(int, [len(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement is
# basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [make_block(values=blocks[0],
placement=slice(0, len(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if isinstance(v, (SparseArray, ABCSparseSeries)):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if hasattr(v, 'tz') and v.tz is not None:
object_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).any():
object_items.append((i, k, v))
continue
int_items.append((i, k, v))
elif v.dtype == np.bool_:
bool_items.append((i, k, v))
elif is_categorical(v):
cat_items.append((i, k, v))
else:
object_items.append((i, k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _simple_blockify(
complex_items, np.complex128)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(
datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(
bool_items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(
object_items, np.object_)
blocks.extend(object_blocks)
if len(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if len(cat_items) > 0:
cat_blocks = [ make_block(array,
klass=CategoricalBlock,
fastpath=True,
placement=[i]
) for i, names, array in cat_items ]
blocks.extend(cat_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(
list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(
array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return len(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not len(blocks):
return None
counts = defaultdict(lambda: [])
for x in blocks:
counts[type(x)].append(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
have_cat = len(counts[CategoricalBlock]) > 0
have_sparse = len(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_td64 or have_cat
if (have_object or
(have_bool and (have_numeric or have_dt64 or have_td64)) or
(have_numeric and has_non_numeric) or
have_cat or
have_dt64 or
have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if len(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
if isinstance(merged_blocks, list):
new_blocks.extend(merged_blocks)
else:
new_blocks.append(merged_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values,
fastpath=True, placement=new_mgr_locs)
# no merge
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
res = False
else:
res = op(a, b)
if np.isscalar(res) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return res
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from pandas.core.internals import make_block
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _get_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in | lib.get_blkno_indexers(blknos, group) | pandas.lib.get_blkno_indexers |
# Copyright 2021 Prayas Energy Group(https://www.prayaspune.org/peg/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from rumi.io import filemanager
from rumi.io import config
from rumi.io import common
from rumi.io import constant
from rumi.io import loaders
from rumi.io import utilities
import logging
import os
import functools
import numpy as np
import itertools
import math
logger = logging.getLogger(__name__)
def load_param(param_name, subfolder):
"""Loader function to be used by yaml framework. do not use this
directly.
"""
filepath = filemanager.find_filepath(param_name, subfolder)
logger.debug(f"Reading {param_name} from file {filepath}")
df = loaders.read_csv(param_name, filepath)
return df
def get_filtered_parameter(param_name):
"""Returns supply parameter at balancing time and balancing area.
This function will do necessary collapsing and expansion of
parameter data. It will do this operation on all float64 columns.
other columns will be treated as categorical.
:param: param_name
:returns: DataFrame
"""
param_data_ = loaders.get_parameter(param_name)
if not isinstance(param_data_, pd.DataFrame) and param_data_ == None:
return param_data_
original_order = [c for c in param_data_.columns]
param_data = utilities.filter_empty(param_data_) # for test data
specs = filemanager.supply_specs()
if param_name in specs:
param_specs = specs[param_name]
folder = param_specs.get("nested")
geographic = param_specs.get("geographic")
time = param_specs.get("time")
if geographic:
param_data = filter_on_geography(
param_data, geographic, folder)
if time:
param_data = filter_on_time(param_data, time, folder)
param_data = preserve_column_order(
param_data, original_order)
return param_data.fillna("")
def preserve_column_order(dataframe, original_order):
class DummyDFColumns:
"""A class to simulate df.columns from pa.DataFrame
"""
def __init__(self, cols):
self.columns = list(cols)
def indexof_geo(oldcols):
subset_cols = utilities.get_geographic_columns_from_dataframe(
oldcols)
return oldcols.columns.index(subset_cols[-1])+1
def indexof_time(oldcols):
subset_cols = utilities.get_time_columns_from_dataframe(oldcols)
return oldcols.columns.index(subset_cols[-1])+1
def extra_geo(dataframe, oldcols):
geo = utilities.get_geographic_columns_from_dataframe(dataframe)
return [c for c in geo if c not in oldcols.columns]
def extra_time(dataframe, oldcols):
time = utilities.get_time_columns_from_dataframe(dataframe)
return [c for c in time if c not in oldcols.columns]
def new_order(dataframe, oldcols):
cols = [c for c in oldcols]
oldcols_ = DummyDFColumns(cols)
if utilities.get_geographic_columns_from_dataframe(oldcols_):
for i, c in enumerate(extra_geo(dataframe, oldcols_),
start=indexof_geo(oldcols_)):
cols.insert(i, c)
oldcols_ = DummyDFColumns(cols)
if utilities.get_time_columns_from_dataframe(oldcols_):
for i, c in enumerate(extra_time(dataframe, oldcols_),
start=indexof_time(oldcols_)):
cols.insert(i, c)
return cols
return dataframe.reindex(columns=new_order(dataframe, original_order))
def filter_empty_columns(data, filtercols):
rows = len(data)
empty = [c for c in filtercols if data[c].isnull(
).sum() == rows or (data[c] == "").sum() == rows]
return data[[c for c in data.columns if c not in empty]]
def filter_empty_geography(data):
"""filter out empty geographic columns"""
return filter_empty_columns(data,
utilities.get_geographic_columns_from_dataframe(data))
def filter_empty_time(data):
"""filter out empty time columns"""
return filter_empty_columns(data,
utilities.get_time_columns_from_dataframe(data))
def finest_geography_from_balancing(entities):
g = [common.get_geographic_columns(
common.balancing_area(e)) for e in entities]
return max(g, key=len)
@functools.lru_cache(maxsize=1)
def get_all_carriers():
carrriers = ["PhysicalPrimaryCarriers",
"PhysicalDerivedCarriers", "NonPhysicalDerivedCarriers"]
allcarriers = []
for carrrier in carrriers:
allcarriers.extend(
list(loaders.get_parameter(carrrier)['EnergyCarrier']))
return allcarriers
def finest_time_from_balancing(entities):
t = [common.get_time_columns(common.balancing_time(e)) for e in entities]
return max(t, key=len)
@functools.lru_cache(maxsize=16)
def find_EC(entity, value):
if entity == 'EnergyCarrier':
return value
elif entity == 'EnergyConvTech':
EnergyConvTechnologies = loaders.get_parameter(
'EnergyConvTechnologies')
ect = EnergyConvTechnologies.set_index('EnergyConvTech')
return ect.loc[value]['OutputDEC']
else:
EnergyStorTechnologies = loaders.get_parameter(
'EnergyStorTechnologies')
est = EnergyStorTechnologies.set_index('EnergyStorTech')
return est.loc[value]['StoredEC']
def get_entity_type(folder):
if folder == "Carriers":
return 'EnergyCarrier'
elif folder == "Storage":
return 'EnergyStorTech'
else:
return 'EnergyConvTech'
def filter_on_time(data, granularity, folder):
"""granularity is either 'fine' or 'coarse' and folder is one of 'Carriers',
'Technologies', 'Storage'
"""
entity = get_entity_type(folder)
entities = get_all_carriers()
timecols = finest_time_from_balancing(entities)
dfs = []
if granularity == "fine":
for item in data[entity].unique():
q = f"{entity} == '{item}'"
d = filter_empty_time(data.query(q))
balancing_time = common.balancing_time(find_EC(entity, item))
d = group_by_time(d, balancing_time, timecols)
dfs.append(d)
else:
for item in data[entity].unique():
q = f"{entity} == '{item}'"
d = filter_empty_time(data.query(q))
balancing_time = common.balancing_time(find_EC(entity, item))
d = expand_by_time(d, entity, balancing_time, timecols)
dfs.append(d)
return pd.concat(dfs).reset_index(drop=True)
def get_nontime_columns(d):
return [c for c in d.columns if (not pd.api.types.is_float_dtype(d[c])) and c not in constant.TIME_SLICES]
def group_by_time(d, balancing_time, superset_cols):
timecols_ = common.get_time_columns(balancing_time)
othercols = get_nontime_columns(d)
d = utilities.groupby_time(d.fillna(""), othercols, balancing_time).copy()
rows = len(d)
diff = [c for c in superset_cols if c not in timecols_]
for c in diff:
d[c] = pd.Series([""]*rows, dtype=str, name=c)
return d[superset_cols + [c for c in d.columns if c not in superset_cols]]
def expand_by_time(d, entity, balancing_time, superset_cols):
timecols_ = common.get_time_columns(balancing_time)
label = d[entity].unique()[0]
base = utilities.base_dataframe_time(timecols_,
colname=entity,
val=label).reset_index()
d = d.merge(base, how='left')
rows = len(d)
diff = [c for c in superset_cols if c not in timecols_]
for c in diff:
d[c] = pd.Series([""]*rows, dtype=str, name=c)
return d
def filter_on_geography(data, granularity, folder):
"""granularity is either 'fine' or 'coarse' and folder is one of 'Carriers',
'Technologies', 'Storage'
"""
entity = get_entity_type(folder)
entities = get_all_carriers()
geocols = finest_geography_from_balancing(entities)
dfs = []
if granularity == "fine":
for item in data[entity].unique():
q = f"{entity} == '{item}'"
d = filter_empty_geography(data.query(q))
balancing_area = common.balancing_area(find_EC(entity, item))
d = group_by_geographic(d, balancing_area, geocols)
dfs.append(d)
else:
for item in data[entity].unique():
q = f"{entity} == '{item}'"
d = filter_empty_geography(data.query(q))
balancing_area = common.balancing_area(find_EC(entity, item))
d = expand_by_geographic(d, entity, balancing_area, geocols)
dfs.append(d)
return pd.concat(dfs).reset_index(drop=True)
def expand_by_geographic(d, entity, balancing_area, superset_cols):
geocols_ = common.get_geographic_columns(balancing_area)
label = d[entity].unique()[0]
base = utilities.base_dataframe_geography(geocols_,
colname=entity,
val=label).reset_index()
d = d.merge(base, how='left')
rows = len(d)
diff = [c for c in superset_cols if c not in geocols_]
for c in diff:
d[c] = pd.Series([""]*rows, dtype=str, name=c)
return d
def get_nongeographic_columns(d):
"""get non geographics non numeric columns"""
return [c for c in d.columns if (not | pd.api.types.is_float_dtype(d[c]) | pandas.api.types.is_float_dtype |
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
class Datasets:
"""Dataset for classification problem"""
def __init__(
self,
data_file="./train.csv",
cat_cols=None,
num_cols=None,
level_cols=None,
label_col=None,
train=True,
):
"""create new copies instead of references"""
self.cat_cols = cat_cols
self.num_cols = num_cols
self.level_cols = level_cols
self.feature_cols = cat_cols + num_cols + level_cols
self.label_col = label_col
self.label_encoder = None
self.data_df = self._create_data_df(data_file)
self.feature_train
self.target_train
self.feature_test
self.target_test
self.train = train
self.X
self.y
self.scaler
self.one_hot
def _label_encode(self, df, col):
"""label encodes data"""
le = LabelEncoder()
le.fit(df[col])
df[col] = le.transform(df[col])
self.label_encoder = le
return df
def _inverse_label_encode(self, df, col):
"""inverse label encodes data"""
le = self.label_encoder
df[col] = le.inverse_transform(df[col])
def _load_data(self, file):
"""loads csv to pd dataframe"""
return pd.read_csv(file)
# def _create_kfold(self, file):
# """make k fold for data"""
# df = _load_data(file)
# df["kfold"] = -1
# df = df.sample(frac=1).reset_index(drop=True)
# kf = model_selection.StratifiedKFold(
# n_splits=self.kfold, shuffle=False, random_state=24
# )
# for fold, (train_idx, val_idx) in enumerate(kf.split(X=df, y=df.target.values)):
# print(len(train_idx), len(val_idx))
# df.loc[val_idx, "kfold"] = fold
# return df
def _create_data_df(self, data_file, preprocess=True, label_encode=False):
"""loads and encodes train data"""
data = self._load_data(data_file)
if preprocess:
data = self._impute_missing_values(
data, self.cat_cols, self.num_cols, self.level_cols
)
data = self._feature_preprocessing(
data, self.cat_cols, self.num_cols, self.level_cols
)
if label_encode:
self._label_encode(data, self.label_col)
self._split_train_test(data)
return data
def _impute_missing_values(
self, df, categorical_features, numeric_features, level_features
):
"""Imputes the continious columns with median and categorical columns with the mode value"""
imputer_con = SimpleImputer(missing_values=np.nan, strategy="median")
imputer_cat = SimpleImputer(missing_values=np.nan, strategy="most_frequent")
for col in categorical_features + numeric_features + level_features:
if df[col].isnull().sum() > 0:
if col in categorical_features + level_features:
df[col] = imputer_cat.fit_transform(df[col].values.reshape(-1, 1))
elif col in numeric_features:
df[col] = imputer_con.fit_transform(df[col].values.reshape(-1, 1))
return df
def _onehot_encoding(self, df, cat_features):
encoded_features = []
self.one_hot = {}
for feature in cat_features:
oh = OneHotEncoder()
encoded_feat = oh.fit_transform(df[feature].values.reshape(-1, 1)).toarray()
self.one_hot[feature] = oh
n = df[feature].nunique()
cols = ["{}_{}".format(feature, n) for n in range(1, n + 1)]
self.one_hot[str(feature) + "col"] = cols
encoded_df = pd.DataFrame(encoded_feat, columns=cols)
encoded_df.index = df.index
encoded_features.append(encoded_df)
df = pd.concat([df, *encoded_features[:6]], axis=1)
# drop columns after one hot
df.drop(columns=cat_features, inplace=True)
return df
def _onehot_newdata(self, df):
encoded_features = []
for feature in self.cat_cols:
oh = self.one_hot[feature]
encoded_feat = oh.transform(df[feature].values.reshape(-1, 1)).toarray()
self.one_hot[feature] = oh
encoded_df = pd.DataFrame(
encoded_feat, columns=self.one_hot[str(feature) + "col"]
)
encoded_df.index = df.index
encoded_features.append(encoded_df)
df = | pd.concat([df, *encoded_features[:6]], axis=1) | pandas.concat |
import logging, os, sys, pickle, json, time, yaml
from datetime import datetime as dt
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
tqdm.pandas()
import pandas as pd
import geopandas as gpd
from geopandas.plotting import _plot_linestring_collection, _plot_point_collection
import numpy as np
from shapely import geometry, wkt, ops
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib as mpl
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import FancyBboxPatch
from ffsc.pipeline.nodes.utils import V_inv
import networkx as nx
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
import multiprocessing as mp
N_WORKERS=6
def visualise_gpd(params, gdfs, ne, logger):
fig, ax = plt.subplots(1,1,figsize=params['figsize'])
ne.plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*params['vis_colors']['ne']), **params['type_style']['ne'])
for dd in gdfs:
logger.info(f'plotting {dd["type"]} {dd["color_key"]}')
if dd['type']=='lin_asset':
dd['gdf']['len'] = dd['gdf']['geometry'].apply(lambda geom: geom.length)
dd['gdf'] = dd['gdf'][dd['gdf']['len']<345]
dd['gdf'].plot(
ax=ax,
color='#{:02x}{:02x}{:02x}'.format(*params['vis_colors'][dd['color_key']]),
**params['type_style'][dd['type']]
)
plt.savefig(params['path'])
def visualise_assets_simplified_coal(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_simplified_coal.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_assets_simplified_oil(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_simplified_oil.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_assets_simplified_gas(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_simplified_gas.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_assets_coal(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_coal.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_assets_oil(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_oil.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_assets_gas(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_gas.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_all_assets(vis_params):
ne = kedro_catalog.load('ne')
gdfs = {}
gdfs['SHIPPINGROUTE'] = kedro_catalog.load('raw_shippingroutes_data')
gdfs['PORT'] = kedro_catalog.load('raw_ports_data')
gdfs['LNGTERMINAL'] = kedro_catalog.load('raw_lngterminals_data')
gdfs['COALMINE'] = kedro_catalog.load('raw_coalmines_data')
gdfs['OILFIELD'] = kedro_catalog.load('raw_oilfields_data')
gdfs['OILWELL'] = kedro_catalog.load('raw_oilwells_data')
gdfs['REFINERY'] = kedro_catalog.load('raw_processingplants_data')
gdfs['RAILWAY'] = kedro_catalog.load('raw_railways_data')
gdfs['RAILWAY'] = gpd.GeoDataFrame.from_features(gdfs['RAILWAY']['features'])
gdfs['PIPELINE'] = kedro_catalog.load('raw_pipelines_data')
gdfs['PIPELINE'] = gpd.GeoDataFrame.from_features(gdfs['PIPELINE']['features'])
gdfs['CITY'] = kedro_catalog.load('raw_cities_energy_data')
gdfs['CITY']['orig_geom'] = gdfs['CITY']['geom_gj'].apply(lambda el: geometry.shape(el))
gdfs['CITY'] = gdfs['CITY'].set_geometry('orig_geom')
gdfs['POWERSTATION'] = kedro_catalog.load('raw_pipelines_data')
gdfs['POWERSTATION'] = gpd.GeoDataFrame.from_features(gdfs['POWERSTATION']['features'])
fig, ax = plt.subplots(1,1,figsize=(36,36))
ne.boundary.plot(ax=ax,color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['ne']),zorder=0)
gdfs['SHIPPINGROUTE'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['SHIPPINGROUTE']), lw=0.5, zorder=1)
gdfs['PORT'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['PORT']), markersize=5, zorder=1)
gdfs['LNGTERMINAL'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['LNGTERMINAL']), markersize=13, zorder=1)
gdfs['COALMINE'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['COALMINE']), markersize=8, zorder=1)
gdfs['OILFIELD'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['OILFIELD']),alpha=0.5, zorder=1)
gdfs['OILWELL'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['OILWELL']),markersize=5, zorder=1)
gdfs['REFINERY'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['REFINERY']),markersize=5, zorder=1)
gdfs['PIPELINE'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['PIPELINE']),lw=0.3, zorder=1)
gdfs['RAILWAY'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['RAILWAY']),lw=0.5, zorder=1)
gdfs['CITY'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['CITY']), zorder=2)
gdfs['POWERSTATION'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['POWERSTATION']),markersize=8,alpha=0.5, zorder=2)
ax.set_aspect(1.2)
plt.savefig('./all_assets_vis.png', bbox_inches='tight')
def prep_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
logger = logging.getLogger('Prep assets')
df_ptassets = pd.concat([refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations])
df_linassets = pd.concat([railways, shippingroutes,pipelines])
### filter all dfs
all_nodes = list(set(df_edges['source'].unique().tolist() + df_edges['target'].unique().tolist()))
#all_nodes = all_nodes + [n+'_B' for n in all_nodes]
# drop the lin assets
logger.info('Dropping nodes not in edges')
df_edges['source_type'] = df_edges['source'].str.split('_').str[0]
df_edges['target_type'] = df_edges['target'].str.split('_').str[0]
for lin_asset in ['RAILWAY','PIPELINE','SHIPPINGROUTE']:
df_edges = df_edges[~((df_edges['source_type']==lin_asset) & (df_edges['target_type']==lin_asset))]
df_edges = df_edges[df_edges['source_type']!='supersource']
# drop any '_B' assets
df_edges['source'] = df_edges['source'].str.replace('_B','')
df_edges['target'] = df_edges['target'].str.replace('_B','')
# join geometries for missing
df_missing_cities = pd.merge(df_missing_cities, df_ptassets[['unique_id','geometry']], how='left',on='unique_id')
#print ('missing')
#print (df_missing_cities)
#print (df_missing_powerstations)
# drop non-nodes
df_ptassets = df_ptassets[df_ptassets['unique_id'].isin(all_nodes)]
df_linassets = df_linassets[df_linassets['START'].isin(all_nodes)]
# map geoms on ptassets
logger.info('mapping geometries')
df_ptassets['geometry'] = df_ptassets['geometry'].apply(wkt.loads)
# do polygon assets
df_ptassets.loc[df_ptassets['unique_id'].str.split('_').str[0]=='OILFIELD','geometry'] = df_ptassets.loc[df_ptassets['unique_id'].str.split('_').str[0]=='OILFIELD','geometry'].apply(lambda el: el.representative_point())
df_ptassets.loc[df_ptassets['unique_id'].str.split('_').str[0]=='CITY','geometry'] = df_ptassets.loc[df_ptassets['unique_id'].str.split('_').str[0]=='CITY','geometry'].apply(lambda el: el.representative_point())
# map geoms on remaining edges
df_edges = pd.merge(df_edges, df_ptassets[['unique_id','geometry']], how='left',left_on='source',right_on='unique_id').rename(columns={'geometry':'geometry_source'}).drop(columns=['unique_id'])
df_edges = pd.merge(df_edges, df_ptassets[['unique_id','geometry']], how='left',left_on='target',right_on='unique_id').rename(columns={'geometry':'geometry_target'}).drop(columns=['unique_id'])
df_edges.loc[df_edges['source_type'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE']),'geometry_source'] = df_edges.loc[df_edges['source_type'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE']),'source'].apply(lambda el: geometry.Point([float(cc) for cc in el.split('_')[2:4]]))
df_edges.loc[df_edges['target_type'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE']),'geometry_target'] = df_edges.loc[df_edges['target_type'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE']),'target'].apply(lambda el: geometry.Point([float(cc) for cc in el.split('_')[2:4]]))
print ('bork')
print (df_edges.loc[df_edges['target_type'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE'])])
df_edges['geometry'] = df_edges.apply(lambda row: geometry.LineString([row['geometry_source'],row['geometry_target']]), axis=1)
print ('IDL')
pos_idl = ((df_linassets['START'].str.split('_').str[0]=='SHIPPINGROUTE') &(df_linassets['END'].str.split('_').str[0]=='SHIPPINGROUTE')&(df_linassets['START'].str.split('_').str[2].astype(float)<-175)&(df_linassets['END'].str.split('_').str[2].astype(float)>175))
neg_idl =((df_linassets['START'].str.split('_').str[0]=='SHIPPINGROUTE') &(df_linassets['END'].str.split('_').str[0]=='SHIPPINGROUTE')&(df_linassets['START'].str.split('_').str[2].astype(float)>175)&(df_linassets['END'].str.split('_').str[2].astype(float)<-175))
print (pos_idl.sum(), neg_idl.sum())
# remove IDL from linassets
df_linassets = df_linassets[~pos_idl]
df_linassets = df_linassets[~neg_idl]
# map geoms on linassets (LSS)
df_linassets['start_geometry'] = df_linassets['START'].apply(lambda el: geometry.Point([float(cc) for cc in el.split('_')[2:4]]))
df_linassets['end_geometry'] = df_linassets['END'].apply(lambda el: geometry.Point([float(cc) for cc in el.split('_')[2:4]]))
df_linassets['geometry'] = df_linassets.apply(lambda row: geometry.LineString([row['start_geometry'],row['end_geometry']]),axis=1)
# map geoms on missing
df_missing_cities['geometry'] = df_missing_cities['geometry'].apply(wkt.loads)
df_missing_cities['geometry'] = df_missing_cities['geometry'].apply(lambda el: el.representative_point())
df_missing_powerstations['geometry'] = df_missing_powerstations['geometry'].apply(wkt.loads)
print ('edges')
print (df_edges)
print ('assets')
print (df_ptassets)
print ('linassets')
print (df_linassets)
print ('tuples')
print (set([tuple(el) for el in df_edges[['source_type','target_type']].values.tolist()]))
# get color keys
df_edges['color_key'] = 'FINALMILE'
for kk in ['RAILWAY','PIPELINE','SHIPPINGROUTE']:
df_edges.loc[((df_edges['source_type']==kk) | (df_edges['target_type']==kk)),'color_key'] = kk
df_linassets['color_key'] = df_linassets['START'].str.split('_').str[0]
df_ptassets['color_key'] = df_ptassets['unique_id'].str.split('_').str[0]
return df_edges, df_linassets, df_ptassets, df_missing_cities, df_missing_powerstations
def visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
logger=logging.getLogger('Visualising')
df_edges, df_linassets, df_ptassets, df_missing_cities, df_missing_powerstations = prep_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways, shippingroutes, pipelines, ne)
# prep gdfs
logger.info('Prepping geodataframes')
gdfs = []
for kk in df_ptassets['color_key'].unique():
gdfs.append(
{
'gdf':gpd.GeoDataFrame(df_ptassets[df_ptassets['color_key']==kk], geometry='geometry'),
'color_key':kk,
'type':'pt_asset'
}
)
for kk in df_linassets['color_key'].unique():
gdfs.append(
{
'gdf':gpd.GeoDataFrame(df_linassets[df_linassets['color_key']==kk], geometry='geometry'),
'color_key':kk,
'type':'lin_asset'
}
)
for kk in df_edges['color_key'].unique():
gdfs.append(
{
'gdf':gpd.GeoDataFrame(df_edges[df_edges['color_key']==kk], geometry='geometry'),
'color_key':kk,
'type':'edges'
}
)
# missing
gdfs += [
{
'gdf':gpd.GeoDataFrame(df_missing_cities, geometry='geometry'),
'color_key':'MISSING_CITY',
'type':'missing_city',
},
{
'gdf':gpd.GeoDataFrame(df_missing_powerstations, geometry='geometry'),
'color_key':'MISSING_POWERSTATION',
'type':'missing_powerstation',
},
]
params['figsize'] = (72,48)
logger.info('Callign mpl')
visualise_gpd(params, gdfs, ne, logger)
return []
def visualise_flow(params, ne, df_flow, df_community_edges, df_community_nodes):
# get carrier
if 'COALMINE' in df_community_nodes['NODETYPE'].unique():
carrier='coal'
carrier_supplytypes = ['COALMINE']
elif 'LNGTERMINAL' in df_community_nodes['NODETYPE'].unique():
carrier='gas'
carrier_supplytypes = ['OILFIELD','OILWELL']
else:
carrier='oil'
carrier_supplytypes = ['OILFIELD','OILWELL']
logger = logging.getLogger(f'visualise flow: {carrier}')
logger.info('prepping DFs')
df_community_nodes = df_community_nodes[~df_community_nodes['NODETYPE'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE'])]
print ('nodes')
print (df_community_nodes)
df_flow = df_flow.rename(columns={'SOURCE':'source','TARGET':'target'})
df_flow = df_flow.set_index(['source','target'])
print ('df_flow')
print (df_flow)
df_community_edges['source_type'] = df_community_edges['source'].str.split('_').str[0]
df_community_edges['target_type'] = df_community_edges['target'].str.split('_').str[0]
df_community_edges = df_community_edges.set_index(['source','target'])
print ('df edges')
print (df_community_edges)
df_community_edges = pd.merge(df_community_edges, df_flow[['flow']], how='left', left_index=True, right_index=True)
logger.info('mapping geometries')
df_community_edges['geometry'] = df_community_edges['geometry'].apply(wkt.loads)
df_community_nodes['geometry'] = df_community_nodes['geometry'].apply(wkt.loads)
logger.info('doing colors and weights')
#df_colors = pd.DataFrame.from_dict({kk:"#{:02x}{:02x}{:02x}".format(*vv) for kk,vv in params['vis_colors'].items()}, orient='index').rename(columns={0:'hex'})
colormap = {kk:"#{:02x}{:02x}{:02x}".format(*vv) for kk,vv in params['vis_colors'].items()}
df_community_edges['color_key'] = 'FINALMILE'
for kk in ['RAILWAY','PIPELINE','SHIPPINGROUTE']:
df_community_edges.loc[((df_community_edges['source_type']==kk) | (df_community_edges['target_type']==kk)),'color_key'] = kk
df_community_edges['color_hex'] = df_community_edges['color_key'].map(colormap)
df_community_nodes['color_hex'] = df_community_nodes['NODETYPE'].map(colormap)
MIN_EDGE = 1
MAX_EDGE = 10
MIN_NODE = 1
MAX_NODE = 25
df_community_nodes = pd.merge(df_community_nodes, df_flow.reset_index()[['target','flow']], how='left',left_on='NODE',right_on='target')
# do demand and supply separately
df_community_nodes['s'] = (np.log10(df_community_nodes['D']+1) - np.log10(df_community_nodes['D']+1).min())/(np.log10(df_community_nodes['D']+1).max() - np.log10(df_community_nodes['D']+1).min())*(MAX_NODE-MIN_NODE)+MIN_NODE
df_community_nodes['s_flow'] = (np.log10(df_community_nodes['flow']+1) - np.log10(df_community_nodes['D']+1).min())/(np.log10(df_community_nodes['flow']+1).max() - np.log10(df_community_nodes['flow']+1).min())*(MAX_NODE-MIN_NODE)+MIN_NODE
df_community_nodes.loc[df_community_nodes['NODETYPE'].isin(carrier_supplytypes),'s'] = df_community_nodes.loc[df_community_nodes['NODETYPE'].isin(carrier_supplytypes),'s_flow']
#df_community_edges['s'] = (np.log(df_community_edges['flow']+1) - np.log(df_community_edges['flow']+1).min())/(np.log(df_community_edges['flow']+1).max() - np.log(df_community_edges['flow']+1).min())*(MAX_EDGE-MIN_EDGE)+MIN_EDGE
df_community_edges['s'] = (df_community_edges['flow'] - df_community_edges['flow'].min())/(df_community_edges['flow'].max() - df_community_edges['flow'].min())*(MAX_EDGE-MIN_EDGE)+MIN_EDGE
df_community_edges = df_community_edges[df_community_edges['flow']>0]
# get rid of the ones that are super long
df_community_edges['len'] = df_community_edges['geometry'].apply(lambda geom: geom.length)
df_community_edges = df_community_edges[df_community_edges['len']<350]
#cast to gdf
df_community_nodes = gpd.GeoDataFrame(df_community_nodes, geometry='geometry')
df_community_edges = gpd.GeoDataFrame(df_community_edges, geometry='geometry')
fig, ax = plt.subplots(1,1,figsize=(48,60))
ne.plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*params['vis_colors']['ne']), **params['type_style']['ne'])
_plot_point_collection(
ax=ax,
geoms=df_community_nodes['geometry'],
color=df_community_nodes['color_hex'].values.tolist(),
markersize=df_community_nodes['s'].values.tolist()
)
_plot_linestring_collection(
ax=ax,
geoms=df_community_edges['geometry'],
color=df_community_edges['color_hex'].values.tolist(),
linewidth=df_community_edges['s'].values.tolist()
)
ax.set_aspect(1.5)
#ax.set_position([0,0,1,1])
plt.savefig(os.path.join(os.getcwd(),'results','figures',f'flow_{carrier}.png'))
return []
def compare_flow(params, ne, df_flow_bl, df_flow_cf, df_community_edges, df_community_nodes):
# get carrier
if 'COALMINE' in df_community_nodes['NODETYPE'].unique():
carrier='coal'
carrier_supplytypes = ['COALMINE']
elif 'LNGTERMINAL' in df_community_nodes['NODETYPE'].unique():
carrier='gas'
carrier_supplytypes = ['OILFIELD','OILWELL']
else:
carrier='oil'
carrier_supplytypes = ['OILFIELD','OILWELL']
logger = logging.getLogger(f'visualise flow: {carrier}')
writer = logging.getLogger(f'writer_{carrier}')
fh = logging.FileHandler(f'compare_{carrier}.log')
fh.setLevel(logging.INFO)
writer.addHandler(fh)
logger.info('prepping DFs')
df_community_nodes = df_community_nodes[~df_community_nodes['NODETYPE'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE'])]
df_flow_bl = df_flow_bl.rename(columns={'SOURCE':'source','TARGET':'target'})
df_flow_bl = df_flow_bl.set_index(['source','target'])
df_flow_cf = df_flow_cf.rename(columns={'SOURCE':'source','TARGET':'target'})
df_flow_cf = df_flow_cf.set_index(['source','target'])
df_community_edges['source_type'] = df_community_edges['source'].str.split('_').str[0]
df_community_edges['target_type'] = df_community_edges['target'].str.split('_').str[0]
df_community_edges = df_community_edges.set_index(['source','target'])
print ('edges')
print (df_community_edges)
print ('flow_bl')
print(df_flow_bl)
print ('flow_cf')
print(df_flow_cf)
df_community_edges = | pd.merge(df_community_edges, df_flow_bl[['flow']], how='left', left_index=True, right_index=True) | pandas.merge |
# PointNetVLAD datasets: based on Oxford RobotCar and Inhouse
# Code adapted from PointNetVLAD repo: https://github.com/mikacuy/pointnetvlad
import numpy as np
import os
import pandas as pd
from sklearn.neighbors import KDTree
import pickle
import argparse
import tqdm
from datasets.oxford import TrainingTuple
# Import test set boundaries
from generating_queries.generate_test_sets import P1, P2, P3, P4, check_in_test_set
# Test set boundaries
P = [P1, P2, P3, P4]
RUNS_FOLDER = "oxford/"
FILENAME = "pointcloud_locations_20m_10overlap.csv"
POINTCLOUD_FOLS = "/pointcloud_20m_10overlap/"
def construct_query_dict(df_centroids, base_path, filename, ind_nn_r, ind_r_r=50):
# ind_nn_r: threshold for positive examples
# ind_r_r: threshold for negative examples
# Baseline dataset parameters in the original PointNetVLAD code: ind_nn_r=10, ind_r=50
# Refined dataset parameters in the original PointNetVLAD code: ind_nn_r=12.5, ind_r=50
tree = KDTree(df_centroids[['northing', 'easting']])
ind_nn = tree.query_radius(df_centroids[['northing', 'easting']], r=ind_nn_r)
ind_r = tree.query_radius(df_centroids[['northing', 'easting']], r=ind_r_r)
queries = {}
for anchor_ndx in range(len(ind_nn)):
anchor_pos = np.array(df_centroids.iloc[anchor_ndx][['northing', 'easting']])
query = df_centroids.iloc[anchor_ndx]["file"]
# Extract timestamp from the filename
scan_filename = os.path.split(query)[1]
assert os.path.splitext(scan_filename)[1] == '.bin', f"Expected .bin file: {scan_filename}"
timestamp = int(os.path.splitext(scan_filename)[0])
positives = ind_nn[anchor_ndx]
non_negatives = ind_r[anchor_ndx]
positives = positives[positives != anchor_ndx]
# Sort ascending order
positives = np.sort(positives)
non_negatives = np.sort(non_negatives)
# Tuple(id: int, timestamp: int, rel_scan_filepath: str, positives: List[int], non_negatives: List[int])
queries[anchor_ndx] = TrainingTuple(id=anchor_ndx, timestamp=timestamp, rel_scan_filepath=query,
positives=positives, non_negatives=non_negatives, position=anchor_pos)
file_path = os.path.join(base_path, filename)
with open(file_path, 'wb') as handle:
pickle.dump(queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done ", filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Baseline training dataset')
parser.add_argument('--dataset_root', type=str, required=True, help='Dataset root folder')
args = parser.parse_args()
print('Dataset root: {}'.format(args.dataset_root))
assert os.path.exists(args.dataset_root), f"Cannot access dataset root folder: {args.dataset_root}"
base_path = args.dataset_root
all_folders = sorted(os.listdir(os.path.join(base_path, RUNS_FOLDER)))
folders = []
# All runs are used for training (both full and partial)
index_list = range(len(all_folders) - 1)
print("Number of runs: " + str(len(index_list)))
for index in index_list:
folders.append(all_folders[index])
print(folders)
df_train = pd.DataFrame(columns=['file', 'northing', 'easting'])
df_test = | pd.DataFrame(columns=['file', 'northing', 'easting']) | pandas.DataFrame |
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = | Period(freq='D', year=2006, month=4, day=30) | pandas.Period |
# Globals #
import re
import numpy as np
import pandas as pd
import dateutil.parser as dp
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import *
from itertools import islice
from scipy.stats import boxcox
from scipy.integrate import simps
from realtime_talib import Indicator
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from pprint import pprint
from selenium import webdriver
RANDOM_STATE = 42
# Sentiment Preprocessing
def remove_special_chars(headline_list):
"""
Returns list of headlines with all non-alphabetical characters removed.
"""
rm_spec_chars = [re.sub('[^ A-Za-z]+', "", headline) for headline in headline_list]
return rm_spec_chars
def tokenize(headline_list):
"""
Takes list of headlines as input and returns a list of lists of tokens.
"""
tokenized = []
for headline in headline_list:
tokens = word_tokenize(headline)
tokenized.append(tokens)
# print("tokenize")
# pprint(tokenized)
return tokenized
def remove_stop_words(tokenized_headline_list):
"""
Takes list of lists of tokens as input and removes all stop words.
"""
filtered_tokens = []
for token_list in tokenized_headline_list:
filtered_tokens.append([token for token in token_list if token not in set(stopwords.words('english'))])
# print("stop words")
# pprint(filtered_tokens)
return filtered_tokens
def stem(token_list_of_lists):
"""
Takes list of lists of tokens as input and stems every token.
Returns a list of lists of stems.
"""
stemmer = PorterStemmer()
stemmed = []
for token_list in token_list_of_lists:
# print(token_list)
stemmed.append([stemmer.stem(token) for token in token_list])
# print("stem")
# pprint(stemmed)
return stemmed
def make_bag_of_words(df, stemmed):
"""
Create bag of words model.
"""
print("\tCreating Bag of Words Model...")
very_pos = set()
slightly_pos = set()
neutral = set()
slightly_neg = set()
very_neg = set()
# Create sets that hold words in headlines categorized as "slightly_neg" or "slightly_pos" or etc
for stems, sentiment in zip(stemmed, df["Sentiment"].tolist()):
if sentiment == -2:
very_neg.update(stems)
elif sentiment == -1:
slightly_neg.update(stems)
elif sentiment == 0:
neutral.update(stems)
elif sentiment == 1:
slightly_pos.update(stems)
elif sentiment == 2:
very_pos.update(stems)
# Count number of words in each headline in each of the sets and encode it as a list of counts for each headline.
bag_count = []
for x in stemmed:
x = set(x)
bag_count.append(list((len(x & very_neg), len(x & slightly_neg), len(x & neutral), len(x & slightly_pos), len(x & very_pos))))
df["sentiment_class_count"] = bag_count
return df
def sentiment_preprocessing(df):
"""
Takes a dataframe, removes special characters, tokenizes
the headlines, removes stop-tokens, and stems the remaining tokens.
"""
specials_removed = remove_special_chars(df["Headline"].tolist())
tokenized = tokenize(specials_removed)
tokenized_filtered = remove_stop_words(tokenized)
stemmed = stem(tokenized_filtered)
return df, stemmed
def headlines_balanced_split(dataset, test_size):
"""
Randomly splits dataset into balanced training and test sets.
"""
print("\nSplitting headlines into *balanced* training and test sets...")
# pprint(list(dataset.values))
# pprint(dataset)
# Use sklearn.train_test_split to split all features into x_train and x_test,
# and all expected values into y_train and y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Sentiment", "Headline"], axis=1).values,
dataset["Sentiment"].values, test_size=test_size,
random_state=RANDOM_STATE)
x_train = [x[0] for x in x_train]
x_test = [x[0] for x in x_test]
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=["very_neg", "slightly_neg", "neutral", "slightly_pos", "very_pos"])
train["Sentiment"] = pd.Series(y_train)
# Do the same for x_test and y_test
test = pd.DataFrame(data=x_test, columns=["very_neg", "slightly_neg", "neutral", "slightly_pos", "very_pos"])
test["Sentiment"] = pd.Series(y_test)
train_prediction = train["Sentiment"].values
test_prediction = test["Sentiment"].values
train_trimmed = train.drop(["Sentiment"], axis=1).values
test_trimmed = test.drop(["Sentiment"], axis=1).values
return train_trimmed, test_trimmed, train_prediction, test_prediction
def split(dataset, test_size, balanced=True):
if balanced:
return headlines_balanced_split(dataset, test_size)
else:
# TODO: write imbalanced split function
return None
# Helpers #
def sliding_window(seq, n=2):
"""
Returns a sliding window (of width n) over data from the iterable. https://stackoverflow.com/a/6822773/8740440
"""
"s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ..."
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def integrate(avg_daily_sentiment, interval):
"""
Takes a list of average daily sentiment scores and returns a list of definite integral estimations calculated
with Simpson's method. Each integral interval is determined by the `interval` variable. Shows accumulated sentiment.
"""
# Split into sliding window list of lists
sentiment_windows = sliding_window(avg_daily_sentiment, interval)
integral_simpson_est = []
# https://stackoverflow.com/a/13323861/8740440
for x in sentiment_windows:
# Estimate area using composite Simpson's rule. dx indicates the spacing of the data on the x-axis.
integral_simpson_est.append(simps(x, dx=1))
dead_values = list([None] * interval)
dead_values.extend(integral_simpson_est)
dead_values.reverse()
return dead_values
def random_undersampling(dataset):
"""
Randomly deleting rows that contain the majority class until the number
in the majority class is equal with the number in the minority class.
"""
minority_set = dataset[dataset.Trend == -1.0]
majority_set = dataset[dataset.Trend == 1.0]
# print(dataset.Trend.value_counts())
# If minority set larger than majority set, swap
if len(minority_set) > len(majority_set):
minority_set, majority_set = majority_set, minority_set
# Downsample majority class
majority_downsampled = resample(majority_set,
replace=False, # sample without replacement
n_samples=len(minority_set), # to match minority class
random_state=123) # reproducible results
# Combine minority class with downsampled majority class
return pd.concat([majority_downsampled, minority_set])
def get_popularity(headlines):
# TODO: Randomize user-agents OR figure out how to handle popups
if "Tweets" not in headlines.columns:
counts = []
driver = webdriver.Chrome()
for index, row in headlines.iterrows():
try:
driver.get(row["URL"])
time.sleep(3)
twitter_containers = driver.find_elements_by_xpath("//li[@class='twitter']")
count = twitter_containers[0].find_elements_by_xpath("//span[@class='count']")
if count[0].text == "":
counts.append(1)
else:
counts.append(int(count[0].text))
except:
counts.append(1) # QUESTION: Should it be None?
headlines["Tweets"] = (pd.Series(counts)).values
print(counts)
return headlines
def balanced_split(dataset, test_size):
"""
Randomly splits dataset into balanced training and test sets.
"""
print("\tSplitting data into *balanced* training and test sets")
# Use sklearn.train_test_split to split original dataset into x_train, y_train, x_test, y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Date", "Trend"], axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=dataset.columns[1:-1])
train["Trend"] = pd.Series(y_train)
# Do the same for x_test and y__test
test = pd.DataFrame(data=x_test, columns=dataset.columns[1:-1])
test["Trend"] = pd.Series(y_test)
# Apply random undersampling to both data frames
train_downsampled = random_undersampling(train)
test_downsampled = random_undersampling(test)
train_trend = train_downsampled["Trend"].values
test_trend = test_downsampled["Trend"].values
train_trimmed = train_downsampled.drop(["Trend"], axis=1).values
test_trimmed = test_downsampled.drop(["Trend"], axis=1).values
return train_trimmed, test_trimmed, train_trend, test_trend
def unbalanced_split(dataset, test_size):
"""
Randomly splits dataset into unbalanced training and test sets.
"""
print("\tSplitting data into *unbalanced* training and test sets")
dataset = dataset.drop("Date", axis=1)
output = train_test_split(dataset.drop("Trend", axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
return output
# Main #
def calculate_indicators(ohlcv):
"""
Extracts technical indicators from OHLCV data.
"""
print("\tCalculating technical indicators")
ohlcv = ohlcv.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv = ohlcv.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int((dp.parse(temp_ohlcv.iloc[index]["Date"])).strftime("%s")) for index in range(temp_ohlcv.shape[0])]
temp_ohlcv["Date"] = (pd.Series(unix_times)).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv = temp_ohlcv.iloc[::-1]
# Rate of Change Ratio
rocr3 = ((Indicator(temp_ohlcv, "ROCR", 3)).getHistorical())[::-1]
rocr6 = ((Indicator(temp_ohlcv, "ROCR", 6)).getHistorical())[::-1]
# Average True Range
atr = ((Indicator(temp_ohlcv, "ATR", 14)).getHistorical())[::-1]
# On-Balance Volume
obv = ((Indicator(temp_ohlcv, "OBV")).getHistorical())[::-1]
# Triple Exponential Moving Average
trix = ((Indicator(temp_ohlcv, "TRIX", 20)).getHistorical())[::-1]
# Momentum
mom1 = ((Indicator(temp_ohlcv, "MOM", 1)).getHistorical())[::-1]
mom3 = ((Indicator(temp_ohlcv, "MOM", 3)).getHistorical())[::-1]
# Average Directional Index
adx14 = ((Indicator(temp_ohlcv, "ADX", 14)).getHistorical())[::-1]
adx20 = ((Indicator(temp_ohlcv, "ADX", 20)).getHistorical())[::-1]
# Williams %R
willr = ((Indicator(temp_ohlcv, "WILLR", 14)).getHistorical())[::-1]
# Relative Strength Index
rsi6 = ((Indicator(temp_ohlcv, "RSI", 6)).getHistorical())[::-1]
rsi12 = ((Indicator(temp_ohlcv, "RSI", 12)).getHistorical())[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = (Indicator(temp_ohlcv, "MACD", 12, 26, 9)).getHistorical()
macd, macd_signal, macd_hist = macd[::-1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = ((Indicator(temp_ohlcv, "MA", 6, 1)).getHistorical())[::-1]
ema12 = ((Indicator(temp_ohlcv, "MA", 12, 1)).getHistorical())[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr), len(rsi6), len(rsi12), len(macd), len(macd_signal), len(macd_hist), len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv = ohlcv[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv["MOM (1)"], ohlcv["MOM (3)"], ohlcv["ADX (14)"] = (pd.Series(mom1[:min_length])).values, (pd.Series(mom3[:min_length])).values, (pd.Series(adx14[:min_length])).values
ohlcv["ADX (20)"], ohlcv["WILLR"], ohlcv["RSI (6)"] = (pd.Series(adx20[:min_length])).values, (pd.Series(willr[:min_length])).values, (pd.Series(rsi6[:min_length])).values
ohlcv["RSI (12)"], ohlcv["MACD"], ohlcv["MACD (Signal)"] = (pd.Series(rsi12[:min_length])).values, (pd.Series(macd[:min_length])).values, (pd.Series(macd_signal[:min_length])).values
ohlcv["MACD (Historical)"], ohlcv["EMA (6)"], ohlcv["EMA (12)"] = ( | pd.Series(macd_hist[:min_length]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 16 13:07:06 2017
@author: Steff
"""
from sklearn import preprocessing
import numpy as np
import pandas as pd
import csv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_fscore_support
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn.neighbors import NearestCentroid
from sklearn.svm import LinearSVC
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import resample
import matplotlib.pyplot as plt
class Classifier:
# data = pd.DataFrame containing the attributes
# truth = pd.DataFrame containing the class
# truth_arr = array of truth values
def __init__(self,data,c,upsample=False):
# set data
self.data = data
if upsample:
self.upsample(c)
# set truth array
try:
truth = np.array([x.decode('ascii') for x in self.data[c].values]) # "inline" for loop with []
except AttributeError:
truth = np.array(self.data[c].values)
pass
self.truth = pd.DataFrame(truth,columns=[c],index=data.index)
self.dropColumns([c])
self.extractTruthArray()
"""
Methods for Preprocessing
"""
# drop all given columns
def dropColumns(self,columns):
"""
Drop given columns from the classifier DataFrame
:param columns: array of column names
"""
self.data = self.data.drop(columns,axis=1)
# drop all columns with given prefix
def dropColumnByPrefix(self,prefix):
#print(self.data.filter(regex=prefix))
self.data.drop(list(self.data.filter(regex=prefix,axis=1)), axis=1, inplace=True)
# drop all rows containing missing values
def dropMissing(self):
missing_indices = np.array([],dtype=int)
for c in self.data.columns:
zero = None
if type(self.data[c][0]) == "<class 'bytes'>":
zero = b'?'
if zero is not None:
try:
missing_indices = np.append(missing_indices,self.data[self.data[c]==zero].index.values)
except TypeError:
print("TypeError",c,self.data[c][0],type(self.data[c][0]),isinstance(self.data[c][0],str),zero)
pass
missing_indices = np.unique(missing_indices)
self.data = self.data.drop(missing_indices)
self.truth = self.truth.drop(missing_indices)
self.extractTruthArray()
#Upsample the minority class
def upsample(self, c):
# Separate majority and minority classes
df_majority = self.data[self.data[c] == "yes"]
df_minority = self.data[self.data[c] == "no"]
# Upsample minority class
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=len(df_majority), # to match majority class
random_state=42) # reproducible results
# Combine majority class with upsampled minority class
self.data = pd.concat([df_majority, df_minority_upsampled])
def upsampleTrainData(self):
wholeTrainData = pd.merge(self.data_train, pd.DataFrame(self.target_train), left_index=True, right_index=True)
df_majority = wholeTrainData[wholeTrainData[0] == "yes"]
df_minority = wholeTrainData[wholeTrainData[0] == "no"]
# Upsample minority class
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=len(df_majority), # to match majority class
random_state=42) # reproducible results
# Combine majority class with upsampled minority class
wholeTrainData = pd.concat([df_majority, df_minority_upsampled])
self.target_train = wholeTrainData[0].values
wholeTrainData.drop([0],axis=1, inplace=True)
self.data_train = wholeTrainData
def downsampleTrainData(self):
wholeTrainData = pd.merge(self.data_train, pd.DataFrame(self.target_train), left_index=True, right_index=True)
df_majority = wholeTrainData[wholeTrainData[0] == "yes"]
df_minority = wholeTrainData[wholeTrainData[0] == "no"]
# Upsample minority class
df_majority_downsampled = resample(df_majority,
replace=True, # sample with replacement
n_samples=len(df_minority), # to match majority class
random_state=42) # reproducible results
# Combine majority class with upsampled minority class
wholeTrainData = pd.concat([df_minority, df_majority_downsampled])
self.target_train = wholeTrainData[0].values
wholeTrainData.drop([0],axis=1, inplace=True)
self.data_train = wholeTrainData
# HotEncode given columns
def hotEncode(self,columns):
data_encoded = pd.get_dummies(self.data[columns])
self.dropColumns(columns)
self.data = pd.concat([self.data, data_encoded],axis=1)
# LabelEncode given columns
def labelEncode(self,columns):
data_encoded = self.data[columns].apply(preprocessing.LabelEncoder().fit_transform)
self.dropColumns(columns)
self.data = pd.concat([self.data, data_encoded],axis=1)
# MinMaxScale given columns
def scale(self,columns):
scaler = preprocessing.MinMaxScaler()
data_preprocessed = pd.DataFrame(
scaler.fit_transform(self.data[columns]),
columns=columns,
index=self.data.index
)
self.dropColumns(columns)
self.data = | pd.concat([self.data, data_preprocessed],axis=1) | pandas.concat |
"""
For each rat
time unit value shuffle rat dataset when num_trials
0 200 0 -0.038855 0 DRRD 7 wide_smoothed init 20
1 300 0 -0.084481 0 DRRD 7 wide_smoothed init 20
2 400 0 0.060537 0 DRRD 7 wide_smoothed init 20
3 500 0 0.021759 0 DRRD 7 wide_smoothed init 20
4 600 0 -0.336057 0 DRRD 7 wide_smoothed init 20
...
173 500 17 -0.057553 10 DRRD 10 narrow_smoothed end 70
174 600 17 -0.240128 10 DRRD 10 narrow_smoothed end 70
175 700 17 -0.194336 10 DRRD 10 narrow_smoothed end 70
176 800 17 0.087844 10 DRRD 10 narrow_smoothed end 70
"""
# TODO: Threshold a partir dos p-valores
## Permutation test: 1000 bootstraps
## Non-parametric weight distribution
## Para fazer medida de performance, NÃO permutar teste.
# Usar gallistel
# D prime
# Saliency maps
## Keras - testar se existe p/ regressao log.
## permutation tests - like weights
import pandas as pd
import numpy as np
import sys
sys.path.append('.')
from spikelearn.models import shuffle_val_predict
from spikelearn.data import io
from spikelearn.data.selection import select, to_feature_array
from sklearn.linear_model import LogisticRegression
from itertools import product, chain
DRRD_RATS = ['DRRD 7','DRRD 8','DRRD 9','DRRD 10']
DATASETS = ['medium_smoothed']#['wide_smoothed', 'medium_smoothed', 'narrow_smoothed']
WHENS = ['init', 'end']
NUM_TRIALS = np.arange(10,100,5)
LOGCS = np.linspace(-1.5, 4, 20)
ANALYSIS_NTRIALS = product(DRRD_RATS, DATASETS, WHENS, NUM_TRIALS, [0])
ANALYSIS_REGUL = product(DRRD_RATS, DATASETS, WHENS, [50], LOGCS)
ANALYSIS = chain(ANALYSIS_NTRIALS, ANALYSIS_REGUL)
results = pd.DataFrame()
preds = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 10:54:17 2018
@author: ericlberlow
This script makes frequency distribution plots of
* keyword tags
* concept tags
* keyworkd clusters
for both networks. It uses bokeh for rendering interactive charts.
"""
#%%
import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
sys.path.append("../Tag2Network/tag2network")
import pandas as pd
import numpy as np
from collections import Counter, OrderedDict
import holoviews as hv
from bokeh.io import output_file, show
#from bokeh.io import save
from bokeh.plotting import reset_output
hv.extension('bokeh','matplotlib')
Colors = ['#2aadbf','#f06b51', '#fbb44d', '#616161','#334e7d','#1a7480','#539280'] # blue, orange, yellow, gray, dark blue, ocean green, light green
# Define input and output file paths
dictpath = "."
datapath = "Results/"
kwdname_IDF = (datapath + "scifi_network_IDF_201810.xlsx")
kwdname_noIDF = (datapath + "scifi_network_noIDF_201810.xlsx")
#%% ####################
print('reading nodes files')
ndfIDF = pd.read_excel(kwdname_IDF, sheet_name='Nodes') # read nodes file
ndfNoIDF = pd.read_excel(kwdname_noIDF, sheet_name='Nodes') # read nodes file
#%% ####################
# function to create df with tag counts and percents sorted by most common for histogram,
def buildTagHistDf (df, col):
totBooks = len(df)
tagDict = {}
tagLists = df[col].str.split('|')
tagLists = tagLists.apply(lambda x: [tag.strip(' ') for tag in x]) # strip empty spaces for each item in each list
tagHist = OrderedDict(Counter([t for tags in tagLists for t in tags if t is not '']).most_common()) #excludes rows with no data
tagDict[col] = list(tagHist.keys())
tagDict['count'] = list(tagHist.values())
tagdf = pd.DataFrame(tagDict)
tagdf['pct'] = tagdf['count'].apply(lambda x: np.round((100*(x/totBooks)),2))
return tagdf
#%%
## create files for keyword distribution, concepts distribution, n_keywordsPerBooK counts ##
print('make dataframs of keyword distribution, concepts distribution, n keywords per book counts')
## keywords
tags_df = buildTagHistDf(ndfIDF, 'keywords')
## concepts
concepts_df = buildTagHistDf(ndfIDF, 'concepts')
# n keywords per book count
vals, counts = np.unique(ndfIDF['n_keywords'], return_counts=True)
tagcounts = | pd.DataFrame({'n_keywordsPerBook':vals, 'count':counts}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals # NOQA
import plottool as pt
import utool as ut
from ibeis.algo.verif import vsone
from ibeis.scripts._thesis_helpers import DBInputs
from ibeis.scripts.thesis import Sampler # NOQA
from ibeis.scripts._thesis_helpers import Tabular, upper_one, ave_str
from ibeis.scripts._thesis_helpers import dbname_to_species_nice
from ibeis.scripts._thesis_helpers import TMP_RC, W, H, DPI
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP, UNREV # NOQA
import numpy as np # NOQA
import pandas as pd
import ubelt as ub # NOQA
import itertools as it
import matplotlib as mpl
from os.path import basename, join, splitext, exists # NOQA
import ibeis.constants as const
import vtool as vt
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP, UNREV, UNKWN # NOQA
(print, rrr, profile) = ut.inject2(__name__)
CLF = 'VAMP'
LNBNN = 'LNBNN'
def turk_pz():
import ibeis
ibs = ibeis.opendb('GZ_Master1')
infr = ibeis.AnnotInference(ibs, aids='all')
infr.reset_feedback('staging', apply=True)
infr.relabel_using_reviews(rectify=True)
# infr.apply_nondynamic_update()
print(ut.repr4(infr.status()))
infr.ibeis_delta_info()
infr.match_state_delta()
infr.get_ibeis_name_delta()
infr.relabel_using_reviews(rectify=True)
infr.write_ibeis_annotmatch_feedback()
infr.write_ibeis_name_assignment()
pass
@ut.reloadable_class
class GraphExpt(DBInputs):
"""
TODO:
- [ ] Experimental analysis of duration of each phase and state of
graph.
- [ ] Experimental analysis of phase 3, including how far we can get
with automatic decision making and do we discover new merges? If
there are potential merges, can we run phase iii with exactly the
same ordering as before: ordering by probability for automatically
decidable and then by positive probability for others. This should
work for phase 3 and therefore allow a clean combination of the
three phases and our termination criteria. I just thought of this
so don't really have it written cleanly above.
- [ ] Experimental analysis of choice of automatic decision thresholds.
by lowering the threshold we increase the risk of mistakes. Each
mistake costs some number of manual reviews (perhaps 2-3), but if
the frequency of errors is low then we could be saving ourselves a
lot of manual reviews.
\item OTHER SPECIES
CommandLine:
python -m ibeis GraphExpt.measure all PZ_MTEST
Ignore:
>>> from ibeis.scripts.postdoc import *
>>> self = GraphExpt('PZ_MTEST')
>>> self._precollect()
>>> self._setup()
"""
base_dpath = ut.truepath('~/Desktop/graph_expt')
def _precollect(self):
if self.ibs is None:
_GraphExpt = ut.fix_super_reload(GraphExpt, self)
super(_GraphExpt, self)._precollect()
# Split data into a training and testing test
ibs = self.ibs
annots = ibs.annots(self.aids_pool)
names = list(annots.group_items(annots.nids).values())
ut.shuffle(names, rng=321)
train_names, test_names = names[0::2], names[1::2]
train_aids, test_aids = map(ut.flatten, (train_names, test_names))
self.test_train = train_aids, test_aids
params = {}
self.pblm = vsone.OneVsOneProblem.from_aids(
ibs, train_aids, **params)
# ut.get_nonconflicting_path(dpath, suffix='_old')
self.const_dials = {
# 'oracle_accuracy' : (0.98, 1.0),
# 'oracle_accuracy' : (0.98, .98),
'oracle_accuracy' : (0.99, .99),
'k_redun' : 2,
'max_outer_loops' : np.inf,
# 'max_outer_loops' : 1,
}
config = ut.dict_union(self.const_dials)
cfg_prefix = '{}_{}'.format(len(test_aids), len(train_aids))
self._setup_links(cfg_prefix, config)
def _setup(self):
"""
python -m ibeis GraphExpt._setup
Example:
>>> from ibeis.scripts.postdoc import *
>>> #self = GraphExpt('GZ_Master1')
>>> self = GraphExpt('PZ_MTEST')
>>> self = GraphExpt('PZ_Master1')
>>> self._setup()
"""
self._precollect()
train_aids, test_aids = self.test_train
task_key = 'match_state'
pblm = self.pblm
data_key = pblm.default_data_key
clf_key = pblm.default_clf_key
pblm.eval_data_keys = [data_key]
pblm.setup(with_simple=False)
pblm.learn_evaluation_classifiers()
res = pblm.task_combo_res[task_key][clf_key][data_key]
# pblm.report_evaluation()
# TODO: need more principled way of selecting thresholds
# graph_thresh = res.get_pos_threshes('fpr', 0.01)
graph_thresh = res.get_pos_threshes('fpr', 0.001)
# rankclf_thresh = res.get_pos_threshes(fpr=0.01)
# Load or create the deploy classifiers
clf_dpath = ut.ensuredir((self.dpath, 'clf'))
classifiers = pblm.ensure_deploy_classifiers(dpath=clf_dpath)
sim_params = {
'test_aids': test_aids,
'train_aids': train_aids,
'classifiers': classifiers,
'graph_thresh': graph_thresh,
# 'rankclf_thresh': rankclf_thresh,
'const_dials': self.const_dials,
}
self.pblm = pblm
self.sim_params = sim_params
return sim_params
def measure_all(self):
self.measure_graphsim()
@profile
def measure_graphsim(self):
"""
CommandLine:
python -m ibeis GraphExpt.measure graphsim GZ_Master1
1
Ignore:
>>> from ibeis.scripts.postdoc import *
>>> #self = GraphExpt('PZ_MTEST')
>>> #self = GraphExpt('GZ_Master1')
>>> self = GraphExpt.measure('graphsim', 'PZ_Master1')
>>> self = GraphExpt.measure('graphsim', 'GZ_Master1')
>>> self = GraphExpt.measure('graphsim', 'PZ_MTEST')
"""
import ibeis
self.ensure_setup()
ibs = self.ibs
sim_params = self.sim_params
classifiers = sim_params['classifiers']
test_aids = sim_params['test_aids']
graph_thresh = sim_params['graph_thresh']
const_dials = sim_params['const_dials']
sim_results = {}
verbose = 1
# ----------
# Graph test
dials1 = ut.dict_union(const_dials, {
'name' : 'graph',
'enable_inference' : True,
'match_state_thresh' : graph_thresh,
})
infr1 = ibeis.AnnotInference(ibs=ibs, aids=test_aids, autoinit=True,
verbose=verbose)
infr1.enable_auto_prioritize_nonpos = True
infr1.params['refresh.window'] = 20
infr1.params['refresh.thresh'] = 0.052
infr1.params['refresh.patience'] = 72
infr1.params['redun.enforce_pos'] = True
infr1.params['redun.enforce_neg'] = True
infr1.init_simulation(classifiers=classifiers, **dials1)
infr1.init_test_mode()
infr1.reset(state='empty')
# if False:
# infr = infr1
# infr.init_refresh()
# n_prioritized = infr.refresh_candidate_edges()
# gen = infr.lnbnn_priority_gen(use_refresh=True)
# next(gen)
# edge = (25, 118)
list(infr1.main_gen())
# infr1.main_loop()
sim_results['graph'] = self._collect_sim_results(infr1, dials1)
# ------------
# Dump experiment output to disk
expt_name = 'graphsim'
self.expt_results[expt_name] = sim_results
ut.ensuredir(self.dpath)
ut.save_data(join(self.dpath, expt_name + '.pkl'), sim_results)
def _collect_sim_results(self, infr, dials):
pred_confusion = pd.DataFrame(infr.test_state['confusion'])
pred_confusion.index.name = 'real'
pred_confusion.columns.name = 'pred'
print('Edge confusion')
print(pred_confusion)
expt_data = {
'real_ccs': list(infr.nid_to_gt_cc.values()),
'pred_ccs': list(infr.pos_graph.connected_components()),
'graph': infr.graph.copy(),
'dials': dials,
'refresh_thresh': infr.refresh._prob_any_remain_thresh,
'metrics': infr.metrics_list,
}
return expt_data
def draw_graphsim(self):
"""
CommandLine:
python -m ibeis GraphExpt.measure graphsim GZ_Master1
python -m ibeis GraphExpt.draw graphsim GZ_Master1 --diskshow
python -m ibeis GraphExpt.draw graphsim PZ_MTEST --diskshow
python -m ibeis GraphExpt.draw graphsim GZ_Master1 --diskshow
python -m ibeis GraphExpt.draw graphsim PZ_Master1 --diskshow
Ignore:
>>> from ibeis.scripts.postdoc import *
>>> self = GraphExpt('GZ_Master1')
>>> self = GraphExpt('PZ_MTEST')
"""
sim_results = self.ensure_results('graphsim')
metric_nice = {
'n_errors': '# errors',
'n_manual': '# manual reviews',
'frac_mistake_aids': 'fraction error annots',
'merge_remain': 'fraction of merges remain',
}
# keys = ['ranking', 'rank+clf', 'graph']
# keycols = ['red', 'orange', 'b']
keys = ['graph']
keycols = ['b']
colors = ut.dzip(keys, keycols)
dfs = {k: pd.DataFrame(v['metrics'])
for k, v in sim_results.items()}
n_aids = sim_results['graph']['graph'].number_of_nodes()
df = dfs['graph']
df['frac_mistake_aids'] = df.n_mistake_aids / n_aids
# mdf = pd.concat(dfs.values(), keys=dfs.keys())
import xarray as xr
panel = xr.concat(
[xr.DataArray(df, dims=('ts', 'metric'))
for df in dfs.values()],
dim=pd.Index(list(dfs.keys()), name='key')
)
xmax = panel.sel(metric='n_manual').values.max()
xpad = (1.01 * xmax) - xmax
pnum_ = pt.make_pnum_nextgen(nSubplots=2)
mpl.rcParams.update(TMP_RC)
fnum = 1
pt.figure(fnum=fnum, pnum=pnum_())
ax = pt.gca()
xkey, ykey = 'n_manual', 'merge_remain'
datas = panel.sel(metric=[xkey, ykey])
for key in keys:
ax.plot(*datas.sel(key=key).values.T, label=key, color=colors[key])
ax.set_ylim(0, 1)
ax.set_xlim(-xpad, xmax + xpad)
ax.set_xlabel(metric_nice[xkey])
ax.set_ylabel(metric_nice[ykey])
ax.legend()
pt.figure(fnum=fnum, pnum=pnum_())
ax = pt.gca()
xkey, ykey = 'n_manual', 'frac_mistake_aids'
datas = panel.sel(metric=[xkey, ykey])
for key in keys:
ax.plot(*datas.sel(key=key).values.T, label=key, color=colors[key])
ax.set_ylim(0, datas.T[1].max() * 1.01)
ax.set_xlim(-xpad, xmax + xpad)
ax.set_xlabel(metric_nice[xkey])
ax.set_ylabel(metric_nice[ykey])
ax.legend()
fig = pt.gcf() # NOQA
fig.set_size_inches([W, H * .75])
pt.adjust_subplots(wspace=.25, fig=fig)
fpath = join(self.dpath, 'simulation.png')
vt.imwrite(fpath, pt.render_figure_to_image(fig, dpi=DPI))
if ut.get_argflag('--diskshow'):
ut.startfile(fpath)
def draw_graphsim2(self):
"""
CommandLine:
python -m ibeis GraphExpt.draw graphsim2 --db PZ_MTEST --diskshow
python -m ibeis GraphExpt.draw graphsim2 GZ_Master1 --diskshow
python -m ibeis GraphExpt.draw graphsim2 PZ_Master1 --diskshow
Example:
>>> from ibeis.scripts.thesis import *
>>> dbname = ut.get_argval('--db', default='GZ_Master1')
>>> self = GraphExpt(dbname)
>>> self.draw_graphsim2()
>>> ut.show_if_requested()
"""
mpl.rcParams.update(TMP_RC)
sim_results = self.ensure_results('graphsim')
expt_data = sim_results['graph']
metrics_df = pd.DataFrame.from_dict(expt_data['metrics'])
# n_aids = sim_results['graph']['graph'].number_of_nodes()
# metrics_df['frac_mistake_aids'] = metrics_df.n_mistake_aids / n_aids
fnum = 1 # NOQA
default_flags = {
'phase': True,
'pred': False,
'user': True,
'real': True,
'error': 0,
'recover': 1,
}
def plot_intervals(flags, color=None, low=0, high=1):
ax = pt.gca()
idxs = np.where(flags)[0]
ranges = ut.group_consecutives(idxs)
bounds = [(min(a), max(a)) for a in ranges if len(a) > 0]
xdata_ = xdata.values
xs, ys = [xdata_[0]], [low]
for a, b in bounds:
x1, x2 = xdata_[a], xdata_[b]
# if x1 == x2:
x1 -= .5
x2 += .5
xs.extend([x1, x1, x2, x2])
ys.extend([low, high, high, low])
xs.append(xdata_[-1])
ys.append(low)
ax.fill_between(xs, ys, low, alpha=.6, color=color)
def overlay_actions(ymax=1, kw=None):
"""
Draws indicators that detail the algorithm state at given
timestamps.
"""
phase = metrics_df['phase'].map(
lambda x: x.split('_')[0])
is_correct = metrics_df['test_action'].map(
lambda x: x.startswith('correct')).values
recovering = metrics_df['recovering'].values
is_auto = metrics_df['user_id'].map(
lambda x: x.startswith('algo:')).values
ppos = metrics_df['pred_decision'].map(
lambda x: x == POSTV).values
rpos = metrics_df['true_decision'].map(
lambda x: x == POSTV).values
# ymax = max(metrics_df['n_errors'])
if kw is None:
kw = default_flags
num = sum(kw.values())
steps = np.linspace(0, 1, num + 1) * ymax
i = -1
def stacked_interval(data, color, i):
plot_intervals(data, color, low=steps[i], high=steps[i + 1])
if kw.get('user', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()),
'user(algo=gold,manual=blue)')
stacked_interval(is_auto, 'gold', i)
stacked_interval(~is_auto, 'blue', i)
if kw.get('pred', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()), 'pred_pos')
stacked_interval(ppos, 'aqua', low=steps[i], high=steps[i + 1])
# stacked_interval(~ppos, 'salmon', i)
if kw.get('real', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()), 'real_merge')
stacked_interval(rpos, 'lime', i)
# stacked_interval(~ppos, 'salmon', i)
if kw.get('error', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()), 'is_error')
# stacked_interval(is_correct, 'blue', low=steps[i], high=steps[i + 1])
stacked_interval(~is_correct, 'red', i)
if kw.get('recover', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()), 'is_recovering')
stacked_interval(recovering, 'orange', i)
if kw.get('phase', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()),
'phase(1=yellow, 2=aqua, 3=pink)')
stacked_interval(phase == 'ranking', 'yellow', i)
stacked_interval(phase == 'posredun', 'aqua', i)
stacked_interval(phase == 'negredun', 'pink', i)
# stacked_interval(phase == 'ranking', 'red', i)
# stacked_interval(phase == 'posredun', 'green', i)
# stacked_interval(phase == 'negredun', 'blue', i)
def accuracy_plot(xdata, xlabel):
ydatas = ut.odict([
('Graph', metrics_df['merge_remain']),
])
pt.multi_plot(
xdata, ydatas, marker='', markersize=1,
xlabel=xlabel, ylabel='fraction of merge remaining',
ymin=0, rcParams=TMP_RC,
use_legend=True, fnum=1, pnum=pnum_(),
)
def error_plot(xdata, xlabel):
# ykeys = ['n_errors']
ykeys = ['frac_mistake_aids']
pt.multi_plot(
xdata, metrics_df[ykeys].values.T,
xlabel=xlabel,
ylabel='fraction error annots',
marker='', markersize=1, ymin=0, rcParams=TMP_RC,
fnum=1, pnum=pnum_(),
use_legend=False,
)
def refresh_plot(xdata, xlabel):
pt.multi_plot(
xdata, [metrics_df['pprob_any']],
label_list=['P(C=1)'],
xlabel=xlabel, ylabel='refresh criteria',
marker='', ymin=0, ymax=1, rcParams=TMP_RC,
fnum=1, pnum=pnum_(),
use_legend=False,
)
ax = pt.gca()
thresh = expt_data['refresh_thresh']
ax.plot([min(xdata), max(xdata)], [thresh, thresh], '-g',
label='refresh thresh')
ax.legend()
def error_breakdown_plot(xdata, xlabel):
ykeys = ['n_fn', 'n_fp']
pt.multi_plot(
xdata, metrics_df[ykeys].values.T,
label_list=ykeys,
xlabel=xlabel, ylabel='# of errors',
marker='x', markersize=1, ymin=0, rcParams=TMP_RC,
ymax=max(metrics_df['n_errors']),
fnum=1, pnum=pnum_(),
use_legend=True,
)
def neg_redun_plot(xdata, xlabel):
n_pred = len(sim_results['graph']['pred_ccs'])
z = (n_pred * (n_pred - 1)) / 2
metrics_df['p_neg_redun'] = metrics_df['n_neg_redun'] / z
metrics_df['p_neg_redun1'] = metrics_df['n_neg_redun1'] / z
ykeys = ['p_neg_redun', 'p_neg_redun1']
pt.multi_plot(
xdata, metrics_df[ykeys].values.T,
label_list=ykeys,
xlabel=xlabel, ylabel='% neg-redun-meta-edges',
marker='x', markersize=1, ymin=0, rcParams=TMP_RC,
ymax=max(metrics_df['p_neg_redun1']),
fnum=1, pnum=pnum_(),
use_legend=True,
)
pnum_ = pt.make_pnum_nextgen(nRows=2, nSubplots=6)
# --- ROW 1 ---
xdata = metrics_df['n_decision']
xlabel = '# decisions'
accuracy_plot(xdata, xlabel)
# overlay_actions(1)
error_plot(xdata, xlabel)
overlay_actions(max(metrics_df['frac_mistake_aids']))
# overlay_actions(max(metrics_df['n_errors']))
# refresh_plot(xdata, xlabel)
# overlay_actions(1, {'phase': True})
# error_breakdown_plot(xdata, xlabel)
neg_redun_plot(xdata, xlabel)
# --- ROW 2 ---
xdata = metrics_df['n_manual']
xlabel = '# manual reviews'
accuracy_plot(xdata, xlabel)
# overlay_actions(1)
error_plot(xdata, xlabel)
overlay_actions(max(metrics_df['frac_mistake_aids']))
# overlay_actions(max(metrics_df['n_errors']))
# refresh_plot(xdata, xlabel)
# overlay_actions(1, {'phase': True})
# error_breakdown_plot(xdata, xlabel)
neg_redun_plot(xdata, xlabel)
# fpath = join(self.dpath, expt_name + '2' + '.png')
# fig = pt.gcf() # NOQA
# fig.set_size_inches([W * 1.5, H * 1.1])
# vt.imwrite(fpath, pt.render_figure_to_image(fig, dpi=DPI))
# if ut.get_argflag('--diskshow'):
# ut.startfile(fpath)
# fig.save_fig
# if 1:
# pt.figure(fnum=fnum, pnum=(2, 2, 4))
# overlay_actions(ymax=1)
pt.set_figtitle(self.dbname)
fig = pt.gcf() # NOQA
fig.set_size_inches([W * 2, H * 2.5])
fig.suptitle(self.dbname)
pt.adjust_subplots(hspace=.25, wspace=.25, fig=fig)
fpath = join(self.dpath, 'graphsim2.png')
fig.savefig(fpath, dpi=DPI)
# vt.imwrite(fpath, pt.render_figure_to_image(fig, dpi=DPI))
if ut.get_argflag('--diskshow'):
ut.startfile(fpath)
def draw_match_states():
import ibeis
infr = ibeis.AnnotInference('PZ_Master1', 'all')
if infr.ibs.dbname == 'PZ_Master1':
# [UUID('0cb1ebf5-2a4f-4b80-b172-1b449b8370cf'),
# UUID('cd644b73-7978-4a5f-b570-09bb631daa75')]
chosen = {
POSTV: (17095, 17225),
NEGTV: (3966, 5080),
INCMP: (3197, 8455),
}
else:
infr.reset_feedback('staging')
chosen = {
POSTV: list(infr.pos_graph.edges())[0],
NEGTV: list(infr.neg_graph.edges())[0],
INCMP: list(infr.incmp_graph.edges())[0],
}
import plottool as pt
import vtool as vt
for key, edge in chosen.items():
match = infr._make_matches_from([edge], config={
'match_config': {'ratio_thresh': .7}})[0]
with pt.RenderingContext(dpi=300) as ctx:
match.show(heatmask=True, show_ell=False, show_ori=False,
show_lines=False)
vt.imwrite('matchstate_' + key + '.jpg', ctx.image)
def entropy_potential(infr, u, v, decision):
"""
Returns the number of edges this edge would invalidate
from ibeis.algo.graph import demo
infr = demo.demodata_infr(pcc_sizes=[5, 2, 4, 2, 2, 1, 1, 1])
infr.refresh_candidate_edges()
infr.params['redun.neg'] = 1
infr.params['redun.pos'] = 1
infr.apply_nondynamic_update()
ut.qtensure()
infr.show(show_cand=True, groupby='name_label')
u, v = 1, 7
decision = 'positive'
"""
nid1, nid2 = infr.pos_graph.node_labels(u, v)
# Cases for K=1
if decision == 'positive' and nid1 == nid2:
# The actual reduction is the number previously needed to make the cc
# k-edge-connected vs how many its needs now.
# In the same CC does nothing
# (unless k > 1, in which case check edge connectivity)
return 0
elif decision == 'positive' and nid1 != nid2:
# Between two PCCs reduces the number of PCCs by one
n_ccs = infr.pos_graph.number_of_components()
# Find needed negative redundency when appart
if infr.neg_redun_metagraph.has_node(nid1):
neg_redun_set1 = set(infr.neg_redun_metagraph.neighbors(nid1))
else:
neg_redun_set1 = set()
if infr.neg_redun_metagraph.has_node(nid2):
neg_redun_set2 = set(infr.neg_redun_metagraph.neighbors(nid2))
else:
neg_redun_set2 = set()
# The number of negative edges needed before we place this edge
# is the number of PCCs that each PCC doesnt have a negative edge to
# yet
n_neg_need1 = (n_ccs - len(neg_redun_set1) - 1)
n_neg_need2 = (n_ccs - len(neg_redun_set2) - 1)
n_neg_need_before = n_neg_need1 + n_neg_need2
# After we join them we take the union of their negative redundancy
# (really we should check if it changes after)
# and this is now the new number of negative edges that would be needed
neg_redun_after = neg_redun_set1.union(neg_redun_set2) - {nid1, nid2}
n_neg_need_after = (n_ccs - 2) - len(neg_redun_after)
neg_entropy = n_neg_need_before - n_neg_need_after # NOQA
def _find_good_match_states(infr, ibs, edges):
pos_edges = list(infr.pos_graph.edges())
timedelta = ibs.get_annot_pair_timedelta(*zip(*edges))
edges = ut.take(pos_edges, ut.argsort(timedelta))[::-1]
wgt = infr.qt_edge_reviewer(edges)
neg_edges = ut.shuffle(list(infr.neg_graph.edges()))
wgt = infr.qt_edge_reviewer(neg_edges)
if infr.incomp_graph.number_of_edges() > 0:
incmp_edges = list(infr.incomp_graph.edges())
if False:
ibs = infr.ibs
# a1, a2 = map(ibs.annots, zip(*incmp_edges))
# q1 = np.array(ut.replace_nones(a1.qual, np.nan))
# q2 = np.array(ut.replace_nones(a2.qual, np.nan))
# edges = ut.compress(incmp_edges,
# ((q1 > 3) | np.isnan(q1)) &
# ((q2 > 3) | np.isnan(q2)))
# a = ibs.annots(asarray=True)
# flags = [t is not None and 'right' == t for t in a.viewpoint_code]
# r = a.compress(flags)
# flags = [q is not None and q > 4 for q in r.qual]
rights = ibs.filter_annots_general(view='right',
minqual='excellent',
require_quality=True,
require_viewpoint=True)
lefts = ibs.filter_annots_general(view='left',
minqual='excellent',
require_quality=True,
require_viewpoint=True)
if False:
edges = list(infr._make_rankings(3197, rights))
wgt = infr.qt_edge_reviewer(edges)
edges = list(ut.random_product((rights, lefts), num=10, rng=0))
wgt = infr.qt_edge_reviewer(edges)
for edge in incmp_edges:
match = infr._make_matches_from([edge])[0]
# infr._debug_edge_gt(edge)
def prepare_cdfs(cdfs, labels):
cdfs = vt.pad_vstack(cdfs, fill_value=1)
# Sort so the best is on top
sortx = np.lexsort(cdfs.T[::-1])[::-1]
cdfs = cdfs[sortx]
labels = ut.take(labels, sortx)
return cdfs, labels
def plot_cmcs(cdfs, labels, fnum=1, pnum=(1, 1, 1), ymin=.4):
cdfs, labels = prepare_cdfs(cdfs, labels)
# Truncte to 20 ranks
num_ranks = min(cdfs.shape[-1], 20)
xdata = np.arange(1, num_ranks + 1)
cdfs_trunc = cdfs[:, 0:num_ranks]
label_list = ['%6.3f%% - %s' % (cdf[0] * 100, lbl)
for cdf, lbl in zip(cdfs_trunc, labels)]
# ymin = .4
num_yticks = (10 - int(ymin * 10)) + 1
pt.multi_plot(
xdata, cdfs_trunc, label_list=label_list,
xlabel='rank', ylabel='match probability',
use_legend=True, legend_loc='lower right', num_yticks=num_yticks,
ymax=1, ymin=ymin, ypad=.005, xmin=.9, num_xticks=5,
xmax=num_ranks + 1 - .5,
pnum=pnum, fnum=fnum,
rcParams=TMP_RC,
)
return pt.gcf()
@ut.reloadable_class
class VerifierExpt(DBInputs):
"""
Collect data from experiments to visualize
python -m ibeis VerifierExpt.measure all PZ_Master1.GZ_Master1,GIRM_Master1,MantaMatcher,RotanTurtles,humpbacks_fb,LF_ALL
python -m ibeis VerifierExpt.measure all GIRM_Master1,PZ_Master1,LF_ALL
python -m ibeis VerifierExpt.measure all LF_ALL
python -m ibeis VerifierExpt.measure all PZ_Master1
python -m ibeis VerifierExpt.measure all MantaMatcher
python -m ibeis VerifierExpt.draw all MantaMatcher
python -m ibeis VerifierExpt.draw rerank PZ_Master1
python -m ibeis VerifierExpt.measure all RotanTurtles
python -m ibeis VerifierExpt.draw all RotanTurtles
Ignore:
>>> from ibeis.scripts.postdoc import *
>>> fpath = ut.glob(ut.truepath('~/Desktop/mtest_plots'), '*.pkl')[0]
>>> self = ut.load_data(fpath)
"""
# base_dpath = ut.truepath('~/Desktop/pair_expts')
base_dpath = ut.truepath('~/latex/crall-iccvw-2017/figures')
agg_dbnames = [
'PZ_Master1',
'GZ_Master1',
# 'LF_ALL',
'MantaMatcher', 'RotanTurtles',
'humpbacks_fb', 'GIRM_Master1',
]
task_nice_lookup = {
'match_state': const.EVIDENCE_DECISION.CODE_TO_NICE,
'photobomb_state': {
'pb': 'Photobomb',
'notpb': 'Not Photobomb',
}
}
def _setup(self, quick=False):
r"""
CommandLine:
python -m ibeis VerifierExpt._setup --db GZ_Master1
python -m ibeis VerifierExpt._setup --db PZ_Master1 --eval
python -m ibeis VerifierExpt._setup --db PZ_MTEST
python -m ibeis VerifierExpt._setup --db PZ_PB_RF_TRAIN
python -m ibeis VerifierExpt.measure_all --db PZ_PB_RF_TRAIN
python -m ibeis VerifierExpt.measure all GZ_Master1
python -m ibeis VerifierExpt.measure all RotanTurtles --show
Example:
>>> from ibeis.scripts.postdoc import *
>>> dbname = ut.get_argval('--db', default='GZ_Master1')
>>> self = VerifierExpt(dbname)
>>> self._setup()
Ignore:
from ibeis.scripts.postdoc import *
self = VerifierExpt('PZ_Master1')
from ibeis.scripts.postdoc import *
self = VerifierExpt('PZ_PB_RF_TRAIN')
from ibeis.scripts.postdoc import *
self = VerifierExpt('LF_ALL')
self = VerifierExpt('RotanTurtles')
task = pblm.samples.subtasks['match_state']
ind_df = task.indicator_df
dist = ibs.get_annotedge_viewdist(ind_df.index.tolist())
np.all(ind_df[dist > 1]['notcomp'])
self.ibs.print_annot_stats(aids, prefix='P')
"""
self._precollect()
print('VerifierExpt _setup()')
ibs = self.ibs
aids = self.aids_pool
# pblm = vsone.OneVsOneProblem.from_aids(ibs, aids, sample_method='random')
pblm = vsone.OneVsOneProblem.from_aids(
ibs, aids,
sample_method='lnbnn+random',
# sample_method='random',
n_splits=10,
)
data_key = 'learn(sum)' # tests without global features
# data_key = 'learn(sum,glob)' # tests with global features
# data_key = pblm.default_data_key # same as learn(sum,glob)
clf_key = pblm.default_clf_key
pblm.eval_task_keys = ['match_state']
# test with and without globals
pblm.eval_data_keys = ['learn(sum)', 'learn(sum,glob)']
# pblm.eval_data_keys = [data_key]
pblm.eval_clf_keys = [clf_key]
ibs = pblm.infr.ibs
# pblm.samples.print_info()
species_code = ibs.get_database_species(pblm.infr.aids)[0]
if species_code == 'zebra_plains':
species = 'Plains Zebras'
if species_code == 'zebra_grevys':
species = 'Grévy\'s Zebras'
else:
species = species_code
self.pblm = pblm
self.species = species
self.data_key = data_key
self.clf_key = clf_key
if quick:
return
pblm.setup_evaluation(with_simple=True)
pblm.report_evaluation()
self.eval_task_keys = pblm.eval_task_keys
cfg_prefix = '{}'.format(len(pblm.samples))
config = pblm.hyper_params
self._setup_links(cfg_prefix, config)
print('Finished setup')
@classmethod
def agg_dbstats(VerifierExpt):
"""
CommandLine:
python -m ibeis VerifierExpt.agg_dbstats
python -m ibeis VerifierExpt.measure_dbstats
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.postdoc import * # NOQA
>>> result = VerifierExpt.agg_dbstats()
>>> print(result)
"""
dfs = []
for dbname in VerifierExpt.agg_dbnames:
self = VerifierExpt(dbname)
info = self.ensure_results('dbstats', nocompute=False)
sample_info = self.ensure_results('sample_info', nocompute=False)
# info = self.measure_dbstats()
outinfo = info['outinfo']
task = sample_info['subtasks']['match_state']
y_ind = task.indicator_df
outinfo['Positive'] = (y_ind[POSTV]).sum()
outinfo['Negative'] = (y_ind[NEGTV]).sum()
outinfo['Incomparable'] = (y_ind[INCMP]).sum()
if outinfo['Database'] == 'mantas':
outinfo['Database'] = 'manta rays'
dfs.append(outinfo)
# labels.append(self.species_nice.capitalize())
df = pd.DataFrame(dfs)
print('df =\n{!r}'.format(df))
df = df.set_index('Database')
df.index.name = None
tabular = Tabular(df, colfmt='numeric')
tabular.theadify = 16
enc_text = tabular.as_tabular()
print(enc_text)
ut.write_to(join(VerifierExpt.base_dpath, 'agg-dbstats.tex'), enc_text)
_ = ut.render_latex(enc_text, dpath=self.base_dpath, fname='agg-dbstats',
preamb_extra=['\\usepackage{makecell}'])
_
# ut.startfile(_)
@classmethod
def agg_results(VerifierExpt, task_key):
"""
python -m ibeis VerifierExpt.agg_results
python -m ibeis VerifierExpt.agg_results --link link-paper-final
GZ_Master1,LF_ALL,MantaMatcher,RotanTurtles,humpbacks_fb,GIRM_Master1
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.postdoc import * # NOQA
>>> task_key = 'match_state'
>>> result = VerifierExpt.agg_results(task_key)
>>> print(result)
"""
VerifierExpt.agg_dbstats()
dbnames = VerifierExpt.agg_dbnames
all_results = ut.odict([])
for dbname in VerifierExpt.agg_dbnames:
self = VerifierExpt(dbname)
info = self.ensure_results('all')
all_results[dbname] = info
rerank_results = ut.odict([])
for dbname in VerifierExpt.agg_dbnames:
self = VerifierExpt(dbname)
info = self.ensure_results('rerank')
rerank_results[dbname] = info
rank_curves = ub.AutoOrderedDict()
rank1_cmc_table = pd.DataFrame(columns=[LNBNN, CLF])
rank5_cmc_table = pd.DataFrame(columns=[LNBNN, CLF])
n_dbs = len(all_results)
color_cycle = mpl.rcParams['axes.prop_cycle'].by_key()['color'][:n_dbs]
color_cycle = ['r', 'b', 'purple', 'orange', 'deeppink', 'g']
markers = pt.distinct_markers(n_dbs)
dbprops = ub.AutoDict()
for n, dbname in enumerate(dbnames):
dbprops[dbname]['color'] = color_cycle[n]
dbprops[dbname]['marker'] = markers[n]
def highlight_metric(metric, data1, data2):
# Highlight the bigger one for each metric
for d1, d2 in it.permutations([data1, data2], 2):
text = '{:.3f}'.format(d1[metric])
if d1[metric] >= d2[metric]:
d1[metric + '_tex'] = '\\mathbf{' + text + '}'
d1[metric + '_text'] = text + '*'
else:
d1[metric + '_tex'] = text
d1[metric + '_text'] = text
for dbname in dbnames:
results = all_results[dbname]
data_key = results['data_key']
clf_key = results['clf_key']
lnbnn_data = results['lnbnn_data']
task_combo_res = results['task_combo_res']
res = task_combo_res[task_key][clf_key][data_key]
nice = dbname_to_species_nice(dbname)
# ranking results
results = rerank_results[dbname]
cdfs, infos = list(zip(*results))
lnbnn_cdf, clf_cdf = cdfs
cdfs = {
CLF: clf_cdf,
LNBNN: lnbnn_cdf,
}
rank1_cmc_table.loc[nice, LNBNN] = lnbnn_cdf[0]
rank1_cmc_table.loc[nice, CLF] = clf_cdf[0]
rank5_cmc_table.loc[nice, LNBNN] = lnbnn_cdf[4]
rank5_cmc_table.loc[nice, CLF] = clf_cdf[4]
# Check the ROC for only things in the top of the LNBNN ranked lists
# nums = [1, 2, 3, 4, 5, 10, 20, np.inf]
nums = [1, 5, np.inf]
for num in nums:
ranks = lnbnn_data['rank_lnbnn_1vM'].values
sub_data = lnbnn_data[ranks <= num]
scores = sub_data['score_lnbnn_1vM'].values
y = sub_data[POSTV].values
probs = res.probs_df[POSTV].loc[sub_data.index].values
cfsm_vsm = vt.ConfusionMetrics().fit(scores, y)
cfsm_clf = vt.ConfusionMetrics().fit(probs, y)
algo_confusions = {
LNBNN: cfsm_vsm,
CLF: cfsm_clf
}
datas = []
for algo in {LNBNN, CLF}:
cfms = algo_confusions[algo]
data = {
'dbname': dbname,
'species': nice,
'fpr': cfms.fpr,
'tpr': cfms.tpr,
'auc': cfms.auc,
'cmc0': cdfs[algo][0],
'cmc': cdfs[algo],
'color': dbprops[dbname]['color'],
'marker': dbprops[dbname]['marker'],
'tpr@fpr=0': cfms.get_metric_at_metric(
'tpr', 'fpr', 0, tiebreaker='minthresh'),
'thresh@fpr=0': cfms.get_metric_at_metric(
'thresh', 'fpr', 0, tiebreaker='minthresh'),
}
rank_curves[num][algo][dbname] = data
datas.append(data)
# Highlight the bigger one for each metric
highlight_metric('auc', *datas)
highlight_metric('tpr@fpr=0', *datas)
highlight_metric('cmc0', *datas)
rank_auc_tables = ut.ddict(lambda: | pd.DataFrame(columns=[LNBNN, CLF]) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import json
import os
from warnings import warn
from collections import OrderedDict
import numpy as np
import pandas as pd
from IPython import embed
def sigm_tf(x):
return 1.0 / (1 + np.exp(-1 * x))
# def sigm(x):
# return 2./(1 + np.exp(-2 * x)) - 1
def flatten(l):
return [item for sublist in l for item in sublist]
def nn_dict_to_matlab(json_file):
newjs = {}
for key, val in json_file.items():
newjs[key.replace("/", "_").replace(":", "_")] = val
matdict = {", ".join(newjs["target_names"]): newjs}
return matdict
class QuaLiKizComboNN:
def __init__(self, target_names, nns, combo_func):
self._nns = nns
for nn in self._nns:
if np.any(nn._feature_names.ne(self._feature_names)):
raise Exception("Supplied NNs have different feature names")
if np.any(self._feature_min > self._feature_max):
raise Exception("Feature min > feature max")
self._combo_func = combo_func
self._target_names = pd.Series(target_names)
if not self._target_names.index.is_unique:
raise Exception("Non unique index for target_names!")
self._target_min = pd.Series(
self._combo_func(*[nn._target_min.values for nn in nns]),
index=self._target_names,
)
self._target_max = pd.Series(
self._combo_func(*[nn._target_max.values for nn in nns]),
index=self._target_names,
)
def get_output(
self,
input,
output_pandas=True,
clip_low=False,
clip_high=False,
low_bound=None,
high_bound=None,
safe=True,
**kwargs,
):
nn_input, safe, clip_low, clip_high, low_bound, high_bound = determine_settings(
self, input, safe, clip_low, clip_high, low_bound, high_bound
)
output = self._combo_func(
*[
nn.get_output(
nn_input,
output_pandas=False,
clip_low=False,
clip_high=False,
safe=safe,
**kwargs,
)
for nn in self._nns
]
)
output = clip_to_bounds(output, clip_low, clip_high, low_bound, high_bound)
if output_pandas is True:
output = pd.DataFrame(output, columns=self._target_names, index=input.index)
return output
@property
def _feature_names(self):
return self._nns[0]._feature_names
@property
def _feature_max(self):
feature_max = pd.Series(
np.full_like(self._nns[0]._feature_max, np.inf),
index=self._nns[0]._feature_max.index,
)
for nn in self._nns:
feature_max = nn._feature_max.combine(feature_max, min)
return feature_max
@property
def _feature_min(self):
feature_min = pd.Series(
np.full_like(self._nns[0]._feature_min, -np.inf),
index=self._nns[0]._feature_min.index,
)
for nn in self._nns:
feature_min = nn._feature_min.combine(feature_min, max)
return feature_min
class QuaLiKizSelfComboNN:
"""
Network output wrapper which applies specified operation on network outputs
upon evaluation. Differs from QuaLiKizComboNN in that this class applies
the operation to across a single multi-output NN instead of across multiple
single-output NNs.
:arg target_names: list. Specifies new output column names when using output_pandas option.
:arg nn: QuaLiKizNDNN. Neural network, this class only accepts a single network object.
:arg combo_func: callable. Operation to apply to NN outputs, can accept any number of arguments as long as it is reflected in indices.
:arg indices: list. Specifies which of the original column names are passed to the operation function, for each new output column.
"""
def __init__(self, target_names, nn, combo_func, indices):
self._nn = nn
if np.any(self._feature_min > self._feature_max):
raise Exception("Feature min > feature max")
self._combo_func = combo_func
self._target_names = pd.Series(target_names)
if not self._target_names.index.is_unique:
raise Exception("Non unique index for target_names!")
for index in range(0, len(indices)):
for item in indices[index]:
if item is not None and item not in self._nn._target_names.values:
raise Exception("Requested operation on non-existant target_name!")
self._combo_indices = indices
if len(self._combo_indices) != len(self._target_names):
raise Exception("Number of target names and operations do not match")
target_min = []
target_max = []
for index in range(0, len(self._combo_indices)):
if None in self._combo_indices[index]:
target_min.append(self._nn._target_min[self._combo_indices[index][0]])
target_max.append(self._nn._target_max[self._combo_indices[index][0]])
else:
target_min.append(
self._combo_func(
*[self._nn._target_min[name] for name in self._combo_indices[index]]
)
)
target_max.append(
self._combo_func(
*[self._nn._target_max[name] for name in self._combo_indices[index]]
)
)
self._target_min = pd.Series(target_min, index=self._target_names)
self._target_max = pd.Series(target_max, index=self._target_names)
def get_output(
self,
input,
output_pandas=True,
clip_low=False,
clip_high=False,
low_bound=None,
high_bound=None,
safe=True,
**kwargs,
):
nn_input, safe, clip_low, clip_high, low_bound, high_bound = determine_settings(
self, input, safe, clip_low, clip_high, low_bound, high_bound
)
pre_output = self._nn.get_output(
nn_input, output_pandas=True, clip_low=False, clip_high=False, safe=safe, **kwargs
)
eval_out = []
for index in range(0, len(self._combo_indices)):
if None in self._combo_indices[index]:
eval_out.append(pre_output[self._combo_indices[index][0]].values)
else:
eval_out.append(
self._combo_func(
*[pre_output[name].values for name in self._combo_indices[index]]
)
)
output = np.hstack([np.transpose(np.atleast_2d(item)) for item in eval_out])
output = clip_to_bounds(output, clip_low, clip_high, low_bound, high_bound)
if output_pandas is True:
output = pd.DataFrame(output, columns=self._target_names, index=input.index)
return output
@property
def _feature_names(self):
return self._nn._feature_names
@property
def _feature_max(self):
return self._nn._feature_max
@property
def _feature_min(self):
return self._nn._feature_min
class QuaLiKizNDNN:
def __init__(self, nn_dict, target_names_mask=None, layer_mode=None, GB_scale_length=1):
"""General ND fully-connected multilayer perceptron neural network
Initialize this class using a nn_dict. This dict is usually read
directly from JSON, and has a specific structure. Generate this JSON
file using the supplied function in QuaLiKiz-Tensorflow
"""
parsed = {}
if layer_mode is None:
try:
import qlknn_intel
except:
layer_mode = "classic"
else:
layer_mode = "intel"
elif layer_mode == "intel":
import qlknn_intel
elif layer_mode == "cython":
import cython_mkl_ndnn
self.GB_scale_length = GB_scale_length
# Read and parse the json. E.g. put arrays in arrays and the rest in a dict
for name, value in nn_dict.items():
if name == "hidden_activation" or name == "output_activation":
parsed[name] = value
elif value.__class__ == list:
parsed[name] = np.array(value)
else:
parsed[name] = dict(value)
# These variables do not depend on the amount of layers in the NN
for set in ["feature", "target"]:
setattr(self, "_" + set + "_names", pd.Series(parsed.pop(set + "_names")))
for set in ["feature", "target"]:
for subset in ["min", "max"]:
setattr(
self,
"_".join(["", set, subset]),
pd.Series(parsed.pop("_".join([set, subset])))[
getattr(self, "_" + set + "_names")
],
)
for subset in ["bias", "factor"]:
setattr(
self,
"_".join(["_feature_prescale", subset]),
pd.Series(parsed["prescale_" + subset])[self._feature_names],
)
setattr(
self,
"_".join(["_target_prescale", subset]),
pd.Series(parsed.pop("prescale_" + subset))[self._target_names],
)
self.layers = []
# Now find out the amount of layers in our NN, and save the weigths and biases
activations = parsed["hidden_activation"] + [parsed["output_activation"]]
for ii in range(1, len(activations) + 1):
try:
name = "layer" + str(ii)
weight = parsed.pop(name + "/weights/Variable:0")
bias = parsed.pop(name + "/biases/Variable:0")
activation = activations.pop(0)
if layer_mode == "classic":
if activation == "tanh":
act = np.tanh
elif activation == "relu":
act = _act_relu
elif activation == "none":
act = _act_none
self.layers.append(QuaLiKizNDNN.NNLayer(weight, bias, act))
elif layer_mode == "intel":
self.layers.append(qlknn_intel.Layer(weight, bias, activation))
elif layer_mode == "cython":
self.layers.append(cython_mkl_ndnn.Layer(weight, bias, activation))
except KeyError:
# This name does not exist in the JSON,
# so our previously read layer was the output layer
break
if len(activations) == 0:
del parsed["hidden_activation"]
del parsed["output_activation"]
try:
self._clip_bounds = parsed["_metadata"]["clip_bounds"]
except KeyError:
self._clip_bounds = False
self._target_names_mask = target_names_mask
# Ignore metadata
try:
self._metadata = parsed.pop("_metadata")
except KeyError:
pass
# Ignore parsed settings
try:
self._parsed_settings = parsed.pop("_parsed_settings")
except KeyError:
pass
if any(parsed):
warn("nn_dict not fully parsed! " + str(parsed))
def apply_layers(self, input, output=None):
"""Apply all NN layers to the given input
The given input has to be array-like, but can be of size 1
"""
input = np.ascontiguousarray(input)
# 3x30 network:
# 14.1 µs ± 913 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
# 20.9 µs ± 2.43 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)
# 19.1 µs ± 240 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
# 2.67 µs ± 29.7 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
for layer in self.layers:
output = np.empty([input.shape[0], layer._weights.shape[1]])
output = layer.apply(input, output)
input = output
return input
class NNLayer:
"""A single (hidden) NN layer
A hidden NN layer is just does
output = activation(weight * input + bias)
Where weight is generally a matrix; output, input and bias a vector
and activation a (sigmoid) function.
"""
def __init__(self, weight, bias, activation):
self._weights = weight
self._biases = bias
self._activation = activation
def apply(self, input, output=None):
preactivation = np.dot(input, self._weights) + self._biases
result = self._activation(preactivation)
return result
def shape(self):
return self.weight.shape
def __str__(self):
return "NNLayer shape " + str(self.shape())
def get_output(
self,
input,
clip_low=False,
clip_high=False,
low_bound=None,
high_bound=None,
safe=True,
output_pandas=True,
**kwargs,
):
"""Calculate the output given a specific input
This function accepts inputs in the form of a dict with
as keys the name of the specific input variable (usually
at least the feature_names) and as values 1xN same-length
arrays.
"""
# 49.1 ns ± 1.53 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
nn_input, safe, clip_low, clip_high, low_bound, high_bound = determine_settings(
self, input, safe, clip_low, clip_high, low_bound, high_bound
)
# nn_input = self._feature_prescale_factors.values[np.newaxis, :] * nn_input + self._feature_prescale_biases.values
# 14.3 µs ± 1.08 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)
nn_input = _prescale(
nn_input,
self._feature_prescale_factor.values,
self._feature_prescale_bias.values,
)
# Apply all NN layers an re-scale the outputs
# 104 µs ± 19.7 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
# 70.9 µs ± 384 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) (only apply layers)
output = (
self.apply_layers(nn_input) - np.atleast_2d(self._target_prescale_bias)
) / np.atleast_2d(self._target_prescale_factor)
# for name in self._target_names:
# nn_output = (np.squeeze(self.apply_layers(nn_input)) - self._target_prescale_biases[name]) / self._target_prescale_factors[name]
# output[name] = nn_output
scale_mask = [
not any(prefix in name for prefix in ["df", "chie", "xaxis"])
for name in self._target_names
]
if self.GB_scale_length != 1 and any(scale_mask):
output[:, scale_mask] /= self.GB_scale_length
output = clip_to_bounds(output, clip_low, clip_high, low_bound, high_bound)
# 118 µs ± 3.83 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
if output_pandas:
output = pd.DataFrame(output, columns=self._target_names, index=input.index)
# 47.4 ns ± 1.79 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
if self._target_names_mask is not None:
output.columns = self._target_names_mask
return output
@classmethod
def from_json(cls, json_file, **kwargs):
with open(json_file) as file_:
dict_ = json.load(file_)
nn = cls(dict_, **kwargs)
return nn
@property
def l2_norm(self):
l2_norm = 0
for layer in self.layers:
l2_norm += np.sum(np.square(layer.weight))
l2_norm /= 2
return l2_norm
@property
def l1_norm(self):
l1_norm = 0
for layer in self.layers:
l1_norm += np.sum(np.abs(layer.weight))
return l1_norm
class QuaLiKizLessDNN(QuaLiKizNDNN):
def __init__(
self,
nn_dict,
const_dict=None,
Zi=None,
target_names_mask=None,
layer_mode=None,
set_all_An_equal=True,
set_all_Ati_equal=True,
set_all_Ti_Te_equal=True,
normni_from_zeff=True,
):
self.set_all_An_equal = set_all_An_equal
self.set_all_Ati_equal = set_all_Ati_equal
self.set_all_Ti_Te_equal = set_all_Ti_Te_equal
self.normni_from_zeff = normni_from_zeff
self._internal_network = QuaLiKizNDNN(
nn_dict, target_names_mask=target_names_mask, layer_mode=layer_mode
)
self._feature_names = self._internal_network._feature_names
self._target_names = self._internal_network._target_names
self._Zi = Zi
self._feature_min = self._internal_network._feature_min
self._feature_max = self._internal_network._feature_max
self._target_min = self._internal_network._target_min
self._target_max = self._internal_network._target_max
for varname in ["An", "Ati", "Ti_Te"]:
if getattr(self, "set_all_" + varname + "_equal") and any(
name.startswith(varname) for name in self._internal_network._feature_names
):
is_subgroup = self._feature_names.apply(lambda x: x.startswith(varname))
setattr(self, "_" + varname + "_vars", self._feature_names.loc[is_subgroup])
subvars = getattr(self, "_" + varname + "_vars")
self._feature_names = self._feature_names.drop(subvars.index)
self._feature_names = self._feature_names.append(
pd.Series(varname, index=[self._feature_names.index.max() + 1])
)
for var, op in {"min": np.max, "max": np.min}.items():
subvals = getattr(self, "_feature_" + var)[subvars]
setattr(
self,
"_feature_" + var,
getattr(self, "_feature_" + var).drop(subvars),
)
setattr(
self,
"_feature_" + var,
getattr(self, "_feature_" + var).append(
pd.Series(op(subvals), index=[varname])
),
)
if self.normni_from_zeff and any(
name.startswith("normni") for name in self._internal_network._feature_names
):
if not "Zeff" in self._internal_network._feature_names.values:
raise Exception("normni_from_zeff is True, but network does not depend on Zeff")
self._normni_vars = self._feature_names.loc[
self._feature_names.apply(lambda x: x.startswith("normni"))
]
if len(self._normni_vars) > 2:
raise Exception(
"normni_from_zeff assumes two ions, but network depends on one, three or more"
)
if self._Zi is None or len(self._Zi) != 2:
raise Exception("normni_from_zeff is True, but no Zi of length two given")
self._feature_names = self._feature_names.drop(self._normni_vars.index)
self._feature_min = self._feature_min.drop(self._normni_vars)
self._feature_max = self._feature_max.drop(self._normni_vars)
self._const_dict = const_dict
for varname in self._const_dict:
self._feature_names = self._feature_names.drop(
self._feature_names[self._feature_names == varname].index
)
def get_output(
self,
input,
clip_low=False,
clip_high=False,
low_bound=None,
high_bound=None,
safe=True,
output_pandas=True,
**kwargs,
):
"""Calculate the output given a specific input
This function accepts inputs in the form of a dict with
as keys the name of the specific input variable (usually
at least the feature_names) and as values 1xN same-length
arrays.
"""
if not isinstance(input, pd.DataFrame) or not safe:
raise NotImplementedError("Unsafe mode or non-DataFrame input")
if clip_low or clip_high:
warn(
"Clipping of in/output not implemented in %s" % (type(self).__name__),
UserWarning,
)
old_setting = pd.options.mode.chained_assignment
pd.options.mode.chained_assignment = None
for varname in ["An", "Ati", "Ti_Te"]:
if varname in self._feature_names.values and getattr(
self, "set_all_" + varname + "_equal"
):
if varname in input:
for name in getattr(self, "_" + varname + "_vars"):
input.loc[:, name] = input.loc[:, varname]
else:
raise KeyError("{!s} not in input".format(varname))
if "Zeff" in self._internal_network._feature_names.values and self.normni_from_zeff:
normni0, normni1 = calculate_normni(self._Zi[0], self._Zi[1], input.loc[:, "Zeff"])
input.loc[:, "normni0"] = normni0
input.loc[:, "normni1"] = normni1
for varname, val in self._const_dict.items():
input.loc[:, varname] = val
pd.options.mode.chained_assignment = old_setting
fluxes = QuaLiKizNDNN.get_output(
self._internal_network,
input,
clip_low=clip_low,
clip_high=clip_high,
low_bound=low_bound,
high_bound=high_bound,
safe=safe,
output_pandas=output_pandas,
)
return fluxes
def calculate_normni(Z0, Z1, Zeff):
normni1 = (Zeff - Z0) / (Z1 ** 2 - Z1 * Z0)
normni0 = (1 - Z1 * normni1) / Z0
return normni0, normni1
def clip_to_bounds(output, clip_low, clip_high, low_bound, high_bound):
if clip_low:
if isinstance(low_bound, (int, float)):
output[output < low_bound] = low_bound
else:
for ii, bound in enumerate(low_bound):
output[:, ii][output[:, ii] < bound] = bound
if clip_high:
if isinstance(high_bound, (int, float)):
output[output < high_bound] = high_bound
else:
for ii, bound in enumerate(high_bound):
output[:, ii][output[:, ii] > bound] = bound
return output
def determine_settings(network, input, safe, clip_low, clip_high, low_bound, high_bound):
if safe:
if isinstance(input, pd.DataFrame):
nn_input = input[network._feature_names]
else:
raise Exception("Please pass a pandas.DataFrame for safe mode")
if low_bound is not None:
low_bound = (
low_bound
if isinstance(low_bound, (int, float))
else low_bound.loc[network._target_names].values
)
if high_bound is not None:
high_bound = (
high_bound
if isinstance(high_bound, (int, float))
else high_bound.loc[network._target_names].values
)
else:
if input.__class__ == pd.DataFrame:
nn_input = input.values
elif input.__class__ == np.ndarray:
nn_input = input
if clip_low is True and (low_bound is None):
low_bound = network._target_min.values
if clip_high is True and (high_bound is None):
high_bound = network._target_max.values
return nn_input, safe, clip_low, clip_high, low_bound, high_bound
def _prescale(nn_input, factors, biases):
return np.atleast_2d(factors) * nn_input + np.atleast_2d(biases)
def _act_none(x):
return x
def _act_relu(x):
return x * (x > 0)
if __name__ == "__main__":
# Test the function
root = os.path.dirname(os.path.realpath(__file__))
# nn1 = QuaLiKizNDNN.from_json(os.path.join(root, 'nn_efe_GB.json'))
# nn2 = QuaLiKizNDNN.from_json(os.path.join(root, 'nn_efi_GB.json'))
# nn = QuaLiKizMultiNN([nn1, nn2])
nn_path = os.path.join(root, "../../tests/gen3_test_files/Network_874_efiITG_GB/nn.json")
nn = QuaLiKizNDNN.from_json(nn_path)
scann = 100
input = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [ | pd.offsets.Day(3) | pandas.offsets.Day |
import re
import numpy as np
import pytest
from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series
import pandas._testing as tm
from pandas.core.arrays.categorical import recode_for_categories
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalAPI:
def test_ordered_api(self):
# GH 9347
cat1 = Categorical(list("acb"), ordered=False)
tm.assert_index_equal(cat1.categories, Index(["a", "b", "c"]))
assert not cat1.ordered
cat2 = Categorical(list("acb"), categories=list("bca"), ordered=False)
tm.assert_index_equal(cat2.categories, Index(["b", "c", "a"]))
assert not cat2.ordered
cat3 = Categorical(list("acb"), ordered=True)
tm.assert_index_equal(cat3.categories, Index(["a", "b", "c"]))
assert cat3.ordered
cat4 = Categorical(list("acb"), categories=list("bca"), ordered=True)
tm.assert_index_equal(cat4.categories, Index(["b", "c", "a"]))
assert cat4.ordered
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
assert not cat2.ordered
cat2 = cat.as_ordered()
assert cat2.ordered
cat2.as_unordered(inplace=True)
assert not cat2.ordered
cat2.as_ordered(inplace=True)
assert cat2.ordered
assert cat2.set_ordered(True).ordered
assert not cat2.set_ordered(False).ordered
cat2.set_ordered(True, inplace=True)
assert cat2.ordered
cat2.set_ordered(False, inplace=True)
assert not cat2.ordered
# removed in 0.19.0
msg = "can't set attribute"
with pytest.raises(AttributeError, match=msg):
cat.ordered = True
with pytest.raises(AttributeError, match=msg):
cat.ordered = False
def test_rename_categories(self):
cat = Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
tm.assert_numpy_array_equal(
res.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(res.categories, Index([1, 2, 3]))
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
exp_cat = Index(["a", "b", "c"])
tm.assert_index_equal(cat.categories, exp_cat)
# GH18862 (let rename_categories take callables)
result = cat.rename_categories(lambda x: x.upper())
expected = Categorical(["A", "B", "C", "A"])
tm.assert_categorical_equal(result, expected)
# and now inplace
res = cat.rename_categories([1, 2, 3], inplace=True)
assert res is None
tm.assert_numpy_array_equal(
cat.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(cat.categories, Index([1, 2, 3]))
@pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]])
def test_rename_categories_wrong_length_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"])
msg = (
"new categories need to have the same number of items as the "
"old categories!"
)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(new_categories)
def test_rename_categories_series(self):
# https://github.com/pandas-dev/pandas/issues/17981
c = Categorical(["a", "b"])
result = c.rename_categories(Series([0, 1], index=["a", "b"]))
expected = Categorical([0, 1])
tm.assert_categorical_equal(result, expected)
def test_rename_categories_dict(self):
# GH 17336
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1})
expected = Index([4, 3, 2, 1])
tm.assert_index_equal(res.categories, expected)
# Test for inplace
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1}, inplace=True)
assert res is None
tm.assert_index_equal(cat.categories, expected)
# Test for dicts of smaller length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "c": 3})
expected = Index([1, "b", 3, "d"])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with bigger length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6})
expected = Index([1, 2, 3, 4])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with no items from old categories
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"f": 1, "g": 3})
expected = Index(["a", "b", "c", "d"])
tm.assert_index_equal(res.categories, expected)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["c", "b", "a"], ordered=True
)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
tm.assert_categorical_equal(cat, old)
# only res is changed
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
assert res is None
tm.assert_categorical_equal(cat, new)
@pytest.mark.parametrize(
"new_categories",
[
["a"], # not all "old" included in "new"
["a", "b", "d"], # still not all "old" in "new"
["a", "b", "c", "d"], # all "old" included in "new", but too long
],
)
def test_reorder_categories_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
msg = "items in new_categories are not the same as in old categories"
with pytest.raises(ValueError, match=msg):
cat.reorder_categories(new_categories)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["a", "b", "c", "d"], ordered=True
)
# first inplace == False
res = cat.add_categories("d")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
tm.assert_categorical_equal(res, expected)
def test_add_categories_existing_raises(self):
# new is in old categories
cat = Categorical(["a", "b", "c", "d"], ordered=True)
msg = re.escape("new categories must not include old categories: {'d'}")
with pytest.raises(ValueError, match=msg):
cat.add_categories(["d"])
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
res = cat.set_categories(["c", "b", "a"], inplace=True)
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
assert res is None
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = Index(["a", "b", "c"])
tm.assert_index_equal(res.categories, exp_categories_back)
tm.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0], dtype=np.int8))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0], dtype=np.int8))
tm.assert_index_equal(res.categories, Index(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_index_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0], dtype=np.int8))
tm.assert_index_equal(c.categories, Index([1, 2, 3, 4]))
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
# all "pointers" to '4' must be changed from 3 to 0,...
c = c.set_categories([4, 3, 2, 1])
# positions are changed
tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3], dtype=np.int8))
# categories are now in new order
tm.assert_index_equal(c.categories, Index([4, 3, 2, 1]))
# output is the same
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
assert c.min() == 4
assert c.max() == 1
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
def test_to_dense_deprecated(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
with tm.assert_produces_warning(FutureWarning):
cat.to_dense()
@pytest.mark.parametrize(
"values, categories, new_categories",
[
# No NaNs, same cats, same order
(["a", "b", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["a", "b", "a"], ["a", "b"], ["b", "a"]),
# Same, unsorted
(["b", "a", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["b", "a", "a"], ["a", "b"], ["b", "a"]),
# NaNs
(["a", "b", "c"], ["a", "b"], ["a", "b"]),
(["a", "b", "c"], ["a", "b"], ["b", "a"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
# Introduce NaNs
(["a", "b", "c"], ["a", "b"], ["a"]),
(["a", "b", "c"], ["a", "b"], ["b"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
# No overlap
(["a", "b", "c"], ["a", "b"], ["d", "e"]),
],
)
@pytest.mark.parametrize("ordered", [True, False])
def test_set_categories_many(self, values, categories, new_categories, ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c.set_categories(new_categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_set_categories_rename_less(self):
# GH 24675
cat = Categorical(["A", "B"])
result = cat.set_categories(["A"], rename=True)
expected = Categorical(["A", np.nan])
tm.assert_categorical_equal(result, expected)
def test_set_categories_private(self):
cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"])
cat._set_categories(["a", "c", "d", "e"])
expected = Categorical(["a", "c", "d"], categories=list("acde"))
tm.assert_categorical_equal(cat, expected)
# fastpath
cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"])
cat._set_categories(["a", "c", "d", "e"], fastpath=True)
expected = Categorical(["a", "c", "d"], categories=list("acde"))
tm.assert_categorical_equal(cat, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"], ordered=True)
# first inplace == False
res = cat.remove_categories("c")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
@pytest.mark.parametrize("removals", [["c"], ["c", np.nan], "c", ["c", "c"]])
def test_remove_categories_raises(self, removals):
cat = Categorical(["a", "b", "a"])
message = re.escape("removals must all be in old categories: {'c'}")
with pytest.raises(ValueError, match=message):
cat.remove_categories(removals)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"], categories=["a", "b", "c", "d", "e"])
exp_categories_all = Index(["a", "b", "c", "d", "e"])
exp_categories_dropped = Index(["a", "b", "c", "d"])
tm.assert_index_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
| tm.assert_index_equal(res.categories, exp_categories_dropped) | pandas._testing.assert_index_equal |
import unittest
import pandas as pd
# fix to allow zip_longest on Python 2.X and 3.X
try: # Python 3
from itertools import zip_longest
except ImportError: # Python 2
from itertools import izip_longest as zip_longest
from math import fabs
from mock import patch, sentinel, Mock, MagicMock
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ib.ext.Execution import Execution
from ib.ext.OrderState import OrderState
from zipline.gens.brokers.ib_broker import IBBroker, TWSConnection
from zipline.testing.fixtures import WithSimParams
from zipline.finance.execution import (StopLimitOrder,
MarketOrder,
StopOrder,
LimitOrder)
from zipline.finance.order import ORDER_STATUS
from zipline.testing.fixtures import (ZiplineTestCase,
WithDataPortal)
@unittest.skip("Failing on CI - Fix later")
class TestIBBroker(WithSimParams,
WithDataPortal,
ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (1, 2)
ASSET_FINDER_EQUITY_SYMBOLS = ("SPY", "XIV")
@staticmethod
def _tws_bars():
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
tws = TWSConnection("localhost:9999:1111")
tws._add_bar('SPY', 12.4, 10,
pd.to_datetime('2017-09-27 10:30:00', utc=True),
10, 12.401, False)
tws._add_bar('SPY', 12.41, 10,
pd.to_datetime('2017-09-27 10:30:40', utc=True),
20, 12.411, False)
tws._add_bar('SPY', 12.44, 20,
pd.to_datetime('2017-09-27 10:31:10', utc=True),
40, 12.441, False)
tws._add_bar('SPY', 12.74, 5,
pd.to_datetime('2017-09-27 10:37:10', utc=True),
45, 12.741, True)
tws._add_bar('SPY', 12.99, 15,
pd.to_datetime('2017-09-27 12:10:00', utc=True),
60, 12.991, False)
tws._add_bar('XIV', 100.4, 100,
pd.to_datetime('2017-09-27 9:32:00', utc=True),
100, 100.401, False)
tws._add_bar('XIV', 100.41, 100,
pd.to_datetime('2017-09-27 9:32:20', utc=True),
200, 100.411, True)
tws._add_bar('XIV', 100.44, 200,
pd.to_datetime('2017-09-27 9:41:10', utc=True),
400, 100.441, False)
tws._add_bar('XIV', 100.74, 50,
pd.to_datetime('2017-09-27 11:42:10', utc=True),
450, 100.741, False)
return tws.bars
@staticmethod
def _create_contract(symbol):
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = 'STK'
return contract
@staticmethod
def _create_order(action, qty, order_type, limit_price, stop_price):
order = Order()
order.m_action = action
order.m_totalQuantity = qty
order.m_auxPrice = stop_price
order.m_lmtPrice = limit_price
order.m_orderType = order_type
return order
@staticmethod
def _create_order_state(status_):
status = OrderState()
status.m_status = status_
return status
@staticmethod
def _create_exec_detail(order_id, shares, cum_qty, price, avg_price,
exec_time, exec_id):
exec_detail = Execution()
exec_detail.m_orderId = order_id
exec_detail.m_shares = shares
exec_detail.m_cumQty = cum_qty
exec_detail.m_price = price
exec_detail.m_avgPrice = avg_price
exec_detail.m_time = exec_time
exec_detail.m_execId = exec_id
return exec_detail
@patch('zipline.gens.brokers.ib_broker.TWSConnection')
def test_get_spot_value(self, tws):
dt = None # dt is not used in real broker
data_freq = 'minute'
asset = self.asset_finder.retrieve_asset(1)
bars = {'last_trade_price': [12, 10, 11, 14],
'last_trade_size': [1, 2, 3, 4],
'total_volume': [10, 10, 10, 10],
'vwap': [12.1, 10.1, 11.1, 14.1],
'single_trade_flag': [0, 1, 0, 1]}
last_trade_times = [pd.to_datetime('2017-06-16 10:30:00', utc=True),
pd.to_datetime('2017-06-16 10:30:11', utc=True),
pd.to_datetime('2017-06-16 10:30:30', utc=True),
pd.to_datetime('2017-06-17 10:31:9', utc=True)]
index = pd.DatetimeIndex(last_trade_times)
broker = IBBroker(sentinel.tws_uri)
tws.return_value.bars = {asset.symbol: pd.DataFrame(
index=index, data=bars)}
price = broker.get_spot_value(asset, 'price', dt, data_freq)
last_trade = broker.get_spot_value(asset, 'last_traded', dt, data_freq)
open_ = broker.get_spot_value(asset, 'open', dt, data_freq)
high = broker.get_spot_value(asset, 'high', dt, data_freq)
low = broker.get_spot_value(asset, 'low', dt, data_freq)
close = broker.get_spot_value(asset, 'close', dt, data_freq)
volume = broker.get_spot_value(asset, 'volume', dt, data_freq)
# Only the last minute is taken into account, therefore
# the first bar is ignored
assert price == bars['last_trade_price'][-1]
assert last_trade == last_trade_times[-1]
assert open_ == bars['last_trade_price'][1]
assert high == max(bars['last_trade_price'][1:])
assert low == min(bars['last_trade_price'][1:])
assert close == bars['last_trade_price'][-1]
assert volume == sum(bars['last_trade_size'][1:])
def test_get_realtime_bars_produces_correct_df(self):
bars = self._tws_bars()
with patch('zipline.gens.brokers.ib_broker.TWSConnection'):
broker = IBBroker(sentinel.tws_uri)
broker._tws.bars = bars
assets = (self.asset_finder.retrieve_asset(1),
self.asset_finder.retrieve_asset(2))
realtime_history = broker.get_realtime_bars(assets, '1m')
asset_spy = self.asset_finder.retrieve_asset(1)
asset_xiv = self.asset_finder.retrieve_asset(2)
assert asset_spy in realtime_history
assert asset_xiv in realtime_history
spy = realtime_history[asset_spy]
xiv = realtime_history[asset_xiv]
assert list(spy.columns) == ['open', 'high', 'low', 'close', 'volume']
assert list(xiv.columns) == ['open', 'high', 'low', 'close', 'volume']
# There are 159 minutes between the first (XIV @ 2017-09-27 9:32:00)
# and the last bar (SPY @ 2017-09-27 12:10:00)
assert len(realtime_history) == 159
spy_non_na = spy.dropna()
xiv_non_na = xiv.dropna()
assert len(spy_non_na) == 4
assert len(xiv_non_na) == 3
assert spy_non_na.iloc[0].name == pd.to_datetime(
'2017-09-27 10:30:00', utc=True)
assert spy_non_na.iloc[0].open == 12.40
assert spy_non_na.iloc[0].high == 12.41
assert spy_non_na.iloc[0].low == 12.40
assert spy_non_na.iloc[0].close == 12.41
assert spy_non_na.iloc[0].volume == 20
assert spy_non_na.iloc[1].name == pd.to_datetime(
'2017-09-27 10:31:00', utc=True)
assert spy_non_na.iloc[1].open == 12.44
assert spy_non_na.iloc[1].high == 12.44
assert spy_non_na.iloc[1].low == 12.44
assert spy_non_na.iloc[1].close == 12.44
assert spy_non_na.iloc[1].volume == 20
assert spy_non_na.iloc[-1].name == pd.to_datetime(
'2017-09-27 12:10:00', utc=True)
assert spy_non_na.iloc[-1].open == 12.99
assert spy_non_na.iloc[-1].high == 12.99
assert spy_non_na.iloc[-1].low == 12.99
assert spy_non_na.iloc[-1].close == 12.99
assert spy_non_na.iloc[-1].volume == 15
assert xiv_non_na.iloc[0].name == pd.to_datetime(
'2017-09-27 9:32:00', utc=True)
assert xiv_non_na.iloc[0].open == 100.4
assert xiv_non_na.iloc[0].high == 100.41
assert xiv_non_na.iloc[0].low == 100.4
assert xiv_non_na.iloc[0].close == 100.41
assert xiv_non_na.iloc[0].volume == 200
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_new_order_appears_in_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
amount = -4
limit_price = 43.1
stop_price = 6
style = StopLimitOrder(limit_price=limit_price, stop_price=stop_price)
order = broker.order(asset, amount, style)
assert len(broker.orders) == 1
assert broker.orders[order.id] == order
assert order.open
assert order.asset == asset
assert order.amount == amount
assert order.limit == limit_price
assert order.stop == stop_price
assert (order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_loaded_from_open_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-<PASSWORD>')
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
ib_order_id = 3
ib_contract = self._create_contract(str(asset.symbol))
action, qty, order_type, limit_price, stop_price = \
'SELL', 40, 'STP LMT', 4.3, 2
ib_order = self._create_order(
action, qty, order_type, limit_price, stop_price)
ib_state = self._create_order_state('PreSubmitted')
broker._tws.openOrder(ib_order_id, ib_contract, ib_order, ib_state)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.status == ORDER_STATUS.HELD
assert zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == -40
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_loaded_from_exec_details(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
(req_id, ib_order_id, shares, cum_qty,
price, avg_price, exec_time, exec_id) = (7, 3, 12, 40,
12.43, 12.50,
'20160101 14:20', 4)
ib_contract = self._create_contract(str(asset.symbol))
exec_detail = self._create_exec_detail(
ib_order_id, shares, cum_qty, price, avg_price,
exec_time, exec_id)
broker._tws.execDetails(req_id, ib_contract, exec_detail)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == -40
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_updated_from_order_status(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
# orderStatus calls only work if a respective order has been created
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
amount = -4
limit_price = 43.1
stop_price = 6
style = StopLimitOrder(limit_price=limit_price, stop_price=stop_price)
order = broker.order(asset, amount, style)
ib_order_id = order.broker_order_id
status = 'Filled'
filled = 14
remaining = 9
avg_fill_price = 12.4
perm_id = 99
parent_id = 88
last_fill_price = 12.3
client_id = 1111
why_held = ''
broker._tws.orderStatus(ib_order_id,
status, filled, remaining, avg_fill_price,
perm_id, parent_id, last_fill_price, client_id,
why_held)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.status == ORDER_STATUS.FILLED
assert not zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == amount
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_multiple_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
order_count = 0
for amount, order_style in [
(-112, StopLimitOrder(limit_price=9, stop_price=1)),
(43, LimitOrder(limit_price=10)),
(-99, StopOrder(stop_price=8)),
(-32, MarketOrder())]:
order = broker.order(asset, amount, order_style)
order_count += 1
assert order_count == len(broker.orders)
assert broker.orders[order.id] == order
is_buy = amount > 0
assert order.stop == order_style.get_stop_price(is_buy)
assert order.limit == order_style.get_limit_price(is_buy)
def test_order_ref_serdes(self):
# Even though _creater_order_ref and _parse_order_ref is private
# it is helpful to test as it plays a key role to re-create orders
order = self._create_order("BUY", 66, "STP LMT", 13.4, 44.2)
serialized = IBBroker._create_order_ref(order)
deserialized = IBBroker._parse_order_ref(serialized)
assert deserialized['action'] == order.m_action
assert deserialized['qty'] == order.m_totalQuantity
assert deserialized['order_type'] == order.m_orderType
assert deserialized['limit_price'] == order.m_lmtPrice
assert deserialized['stop_price'] == order.m_auxPrice
assert (deserialized['dt'] - pd.to_datetime('now', utc=True) <
| pd.Timedelta('10s') | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 25 16:14:45 2021
@author: bdobson
"""
import os
import pandas as pd
import geopandas as gpd
from matplotlib import pyplot as plt
root = os.path.join("C:\\", "Users", "bdobson", "Documents", "GitHub", "cwsd_sewer","data")
catchment = "cranbrook"
cluster = 'cluster_Louv_266'
rain = "august"
dt = "sim_dt_30_s"
cluster_root = os.path.join(root,catchment,"results","2021-03-02",cluster,dt)
results_root = os.path.join(cluster_root, rain)
plots_root = os.path.join(root, catchment, "results", "2021-03-02", "plots")
info_fid = os.path.join(results_root, "highfid_flows.gzip")
flow_fid = os.path.join(results_root, "flows.gzip")
dep_fid = os.path.join(results_root, "depths.gzip")
info_df = pd.read_parquet(info_fid).set_index('time')
info_df.index = pd.to_datetime(info_df.index)
flow_df = pd.read_parquet(flow_fid)
flow_df.index = pd.to_datetime(flow_df.index)
edges_gdf = gpd.read_file(os.path.join(cluster_root, "compartment_edges.geojson"))
nodes_gdf = gpd.read_file(os.path.join(cluster_root, "compartment_nodes.geojson"))
info_fid = os.path.join(results_root, "highfid_nodes.gzip")
node_fid = os.path.join(results_root, "storages.gzip")
infon_df = pd.read_parquet(info_fid).set_index('time')
infon_df.index = pd.to_datetime(infon_df.index)
# infon_df['volume'] = infon_df['volume'] + infon_df['floodvol']
node_df = pd.read_parquet(node_fid)
node_df.index = pd.to_datetime(node_df.index)
node_df.node = node_df.node.str.replace('-sewer','').astype(int)
dep_df = pd.read_parquet(dep_fid)
dep_df.index = pd.to_datetime(dep_df.index)
dep_df.node = dep_df.node.str.replace('-sewer','').astype(int)
flood_fid = os.path.join(results_root, "flood_vol.gzip")
flood_df = pd.read_parquet(flood_fid)
flood_df.index = pd.to_datetime(flood_df.index)
flood_df.node = flood_df.node.str.replace('-land','').astype(int)
def plot_arc(names, dr):
f, axs = plt.subplots(len(names),1,figsize=(5,7.5))
# for i, name in enumerate(names):
# ax = axs[i,1]
for name, ax in zip(names, axs):
nse = pd.merge(flow_df.loc[flow_df.arc == name, 'flow_out'].rolling('300s').mean().reset_index(), info_df.loc[info_df.info_id == name,'val'].reset_index(), left_on = 'date', right_on = 'time')
nse = nse.dropna()
nse = 1 - sum((nse.flow_out - nse.val)**2) / sum((nse.val - nse.val.mean())**2)
print(nse)
flow_df.loc[(flow_df.arc == name) & (flow_df.index.isin(pd.date_range(dr[0],dr[1],freq='60s'))), ['flow_out']].plot(color = ['c'], ax=ax)
# flow_df.loc[(flow_df.arc == name) & (flow_df.index.isin(pd.date_range(dr[0],dr[1],freq='60s'))), ['flow_out']].plot(color = ['c'], ax=ax)
info_ss = info_df.loc[(info_df.info_id == name) & (info_df.index.isin(pd.date_range(dr[0],dr[1],freq='60s'))), 'val']
info_ss = info_ss.resample('60s').mean().fillna(0)
info_ss.plot(color = 'k', ax=ax,marker='.',linestyle='',markersize=1)
ax.set_xlim([dr[0],dr[1]])
if name != names[-1]:
ax.set_xlabel('')
ax.set_xticklabels([])
ax.set_xticks([])
else:
ax.set_xlabel('Time (min)')
# ax = axs[i,0]
# flow_df.loc[flow_df.arc == name, ['flow_out']].rolling('300s').mean().plot(color = ['c'], ax=ax)
# info_df.loc[info_df.info_id == name, 'val'].resample('60s').mean().fillna(0).plot(color = 'k', ax=ax,marker='.',linestyle='',markersize=1)
# mm = flow_df.loc[(flow_df.arc == name) & (flow_df.index.isin(pd.date_range(dr[0],dr[1],freq='60s'))), 'flow_out'].max()
ax.set_ylabel('Flow (m3/min)')
if name != names[-1]:
ax.set_xlabel('')
ax.set_xticklabels([])
ax.set_xticks([])
else:
ax.set_xlabel('Time (min)')
ax.legend(['CWSD','InfoWorks'])
# ax.set_ylim([0, mm*1.75])
f.tight_layout()
return f
def plot_node(names, df, info_lab,dr):
f, axs = plt.subplots(len(names),1,figsize=(5,7.5))
for name, ax in zip(names,axs):
nse = pd.merge(df.loc[df.node == name, 'val'].reset_index(), infon_df.loc[infon_df[cluster] == name,'volume'].reset_index(), left_on = 'date', right_on = 'time')
nse = nse.dropna()
nse = 1 - sum((nse.val - nse.volume)**2) / sum((nse.volume- nse.volume.mean())**2)
print(nse)
df.loc[(df.node == name) & (df.index.isin(pd.date_range(dr[0],dr[1],freq='60s'))), 'val'].plot(color = 'c', ax=ax)
infon_ss = infon_df.loc[(infon_df[cluster] == name) & (infon_df.index.isin(pd.date_range(dr[0],dr[1],freq='60s'))), info_lab]
infon_ss = infon_ss.resample('60s').mean().fillna(0)
infon_ss.plot(color = 'k', ax=ax,marker='.',linestyle='',markersize=1)
# mm = df.loc[(df.node == name) & (df.index.isin(pd.date_range(dr[0],dr[1],freq='30s'))), 'val'].max()
ax.set_ylabel('Volume (m3)')
if name != names[-1]:
ax.set_xlabel('')
ax.set_xticklabels([])
ax.set_xticks([])
else:
ax.set_xlabel('Time (min)')
ax.legend(['CWSD','InfoWorks'])
ax.set_xlim([dr[0],dr[1]])
# ax.set_ylim([0, mm*1.3])
f.tight_layout()
return f
# f = plot_arc(['node_106.1','node_817.1','node_1439.1'], )
f = plot_arc(['node_106.1','node_817.2','node_1753.1'], [pd.to_datetime('2017-08-07 00:00'), pd.to_datetime('2017-08-14 00:00')])#.savefig(os.path.join(plots_root, "flow_example.svg"))
mdf = pd.merge(node_df.reset_index(), flood_df.reset_index(),on=['date','node']).set_index('date')
mdf['val'] = mdf.val_x + mdf.val_y
f = plot_node([1,20,17], mdf, 'volume', [pd.to_datetime('2017-08-07 00:00'), | pd.to_datetime('2017-08-14 00:00') | pandas.to_datetime |
import collections
import errno
import logging
import os
import re
import shutil
import uuid
import time
import traceback
import sys
import pandas as pd
import numpy as np
from openpyxl import load_workbook
from xlrd.biffh import XLRDError
from sklearn import preprocessing
from skbio.stats.composition import ilr, clr
from skbio import DistanceMatrix
from skbio.stats.distance import anosim, permanova, permdisp, pwmantel
import scipy.spatial.distance as dist
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import pdist
import rpy2.robjects.packages as rpackages
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri, numpy2ri
from rpy2.robjects.conversion import localconverter
import plotly.graph_objects as go
from plotly.offline import plot
import plotly.express as px
from installed_clients.DataFileUtilClient import DataFileUtil
from GenericsAPI.Utils.AttributeUtils import AttributesUtil
from GenericsAPI.Utils.SampleServiceUtil import SampleServiceUtil
from GenericsAPI.Utils.DataUtil import DataUtil
import GenericsAPI.Utils.MatrixValidation as vd
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.fba_toolsClient import fba_tools
from installed_clients.kb_GenericsReportClient import kb_GenericsReport
from installed_clients.SampleServiceClient import SampleService
TYPE_ATTRIBUTES = {'description', 'scale', 'row_normalization', 'col_normalization'}
SCALE_TYPES = {'raw', 'ln', 'log2', 'log10'}
class MatrixUtil:
def _validate_import_matrix_from_excel_params(self, params):
"""
_validate_import_matrix_from_excel_params:
validates params passed to import_matrix_from_excel method
"""
logging.info('start validating import_matrix_from_excel params')
# check for required parameters
for p in ['obj_type', 'matrix_name', 'workspace_name', 'scale']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
obj_type = params.get('obj_type')
if obj_type not in self.matrix_types:
raise ValueError('Unknown matrix object type: {}'.format(obj_type))
scale = params.get('scale')
if scale not in SCALE_TYPES:
raise ValueError('Unknown scale type: {}'.format(scale))
if params.get('input_file_path'):
file_path = params.get('input_file_path')
elif params.get('input_shock_id'):
file_path = self.dfu.shock_to_file(
{'shock_id': params['input_shock_id'],
'file_path': self.scratch}).get('file_path')
elif params.get('input_staging_file_path'):
file_path = self.dfu.download_staging_file(
{'staging_file_subdir_path': params.get('input_staging_file_path')}
).get('copy_file_path')
else:
error_msg = "Must supply either a input_shock_id or input_file_path "
error_msg += "or input_staging_file_path"
raise ValueError(error_msg)
refs = {k: v for k, v in params.items() if "_ref" in k}
return (obj_type, file_path, params.get('workspace_name'),
params.get('matrix_name'), refs, scale)
def _upload_to_shock(self, file_path):
"""
_upload_to_shock: upload target file to shock using DataFileUtil
"""
logging.info('Start uploading file to shock: {}'.format(file_path))
file_to_shock_params = {
'file_path': file_path,
'pack': 'zip'
}
shock_id = self.dfu.file_to_shock(file_to_shock_params).get('shock_id')
return shock_id
@staticmethod
def _mkdir_p(path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
@staticmethod
def _find_between(s, start, end):
"""
_find_between: find string in between start and end
"""
return re.search('{}(.*){}'.format(start, end), s).group(1)
@staticmethod
def _write_mapping_sheet(file_path, sheet_name, mapping, index):
"""
_write_mapping_sheet: write mapping to sheet
"""
df_dict = collections.OrderedDict()
df_dict[index[0]] = []
df_dict[index[1]] = []
for key, value in mapping.items():
df_dict.get(index[0]).append(key)
df_dict.get(index[1]).append(value)
df = pd.DataFrame.from_dict(df_dict)
with pd.ExcelWriter(file_path, engine='openpyxl') as writer:
writer.book = load_workbook(file_path)
df.to_excel(writer, sheet_name=sheet_name)
def _generate_tab_content(self, index_page, viewer_name):
tab_content = ''
if index_page:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '''\n<p style="color:red;" >'''
tab_content += '''Matrix is too large to be displayed.</p>\n'''
tab_content += '\n</div>\n'
return tab_content
def _generate_simper_tab_content(self, res, viewer_name):
tab_content = ''
tab_content += '''\n<div id="{}" class="tabcontent">\n'''.format(viewer_name)
html = '''<pre class="tab">''' + str(res).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
return tab_content
def _generate_variable_stat_tab_content(self, res, viewer_name):
tab_content = ''
tab_content += '''\n<div id="{}" class="tabcontent">\n'''.format(viewer_name)
tab_content += '''<table>\n'''
for key, value in res.items():
tab_content += '''<tr>\n'''
tab_content += '''<td>{}</td>\n'''.format(key)
tab_content += '''<td>{}</td>\n'''.format(value)
tab_content += '''</tr>\n'''
tab_content += '''</table>\n'''
tab_content += '\n</div>\n'
return tab_content
def _generate_mantel_test_visualization_content(self, pwmantel_res):
tab_def_content = ''
tab_content = ''
viewer_name = 'pwmantel_res'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Mantel Test</button>\n'''
tab_def_content += '\n</div>\n'
tab_content += '''\n<div id="{}" class="tabcontent">\n'''.format(viewer_name)
tab_content += '''<table>\n'''
# add table headers
tab_content += '''<tr>\n'''
tab_content += '''<th>Distance Matrix 1</th>\n'''
tab_content += '''<th>Distance Matrix 2</th>\n'''
for col in pwmantel_res.columns:
tab_content += '''<th>{}</th>\n'''.format(col)
tab_content += '''</tr>\n'''
# add table contents
for idx, values in enumerate(pwmantel_res.values):
tab_content += '''<tr>\n'''
tab_content += '''<td>{}</td>\n'''.format(pwmantel_res.index[idx][0])
tab_content += '''<td>{}</td>\n'''.format(pwmantel_res.index[idx][1])
values[0] = round(values[0], 4)
for value in values:
tab_content += '''<td>{}</td>\n'''.format(value)
tab_content += '''</tr>\n'''
tab_content += '''</table>\n'''
tab_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_simper_plot(self, species_stats, grouping_names):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating plotly simper plot in {}'.format(output_directory))
self._mkdir_p(output_directory)
simper_plot_path = os.path.join(output_directory, 'SimperPlot.html')
species = list(species_stats.keys())
plot_data = list()
for grouping_name in set(grouping_names):
y_values = list()
y_error = list()
for species_name in species:
species_data = species_stats[species_name]
y_values.append(species_data[grouping_name][0])
y_error.append(species_data[grouping_name][1])
plot_data.append(go.Bar(name=str(grouping_name), x=species, y=y_values,
error_y=dict(type='data', array=y_error)))
fig = go.Figure(data=plot_data)
fig.update_layout(barmode='group',
xaxis=dict(title='species'),
yaxis=dict(title='average abundance count'))
plot(fig, filename=simper_plot_path)
return simper_plot_path
def _generate_simper_plot_content(self, viewer_name, species_stats, grouping_names,
output_directory):
simper_plot_path = self._generate_simper_plot(species_stats, grouping_names)
simper_plot_name = 'SimperPlot.html'
shutil.copy2(simper_plot_path,
os.path.join(output_directory, simper_plot_name))
tab_content = ''
tab_content += '''\n<div id="{}" class="tabcontent">\n'''.format(viewer_name)
tab_content += '<iframe height="500px" width="100%" '
tab_content += 'src="{}" '.format(simper_plot_name)
tab_content += 'style="border:none;"></iframe>\n<p></p>\n'
tab_content += '\n</div>\n'
return tab_content
def _generate_simper_visualization_content(self, simper_ret, simper_sum,
species_stats, grouping_names, output_directory):
tab_def_content = ''
tab_content = ''
viewer_name = 'simper_plot'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Most Influential Species Bar Plot</button>\n'''
tab_content += self._generate_simper_plot_content(viewer_name, species_stats,
grouping_names, output_directory)
viewer_name = 'simper_ret'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Most Influential Species Info</button>\n'''
tab_content += self._generate_simper_tab_content(simper_ret, viewer_name)
viewer_name = 'simper_sum'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Similarity Percentage Summary</button>\n'''
tab_content += self._generate_simper_tab_content(simper_sum, viewer_name)
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_variable_stats_visualization_content(self, anosim_res,
permanova_res, permdisp_res):
tab_def_content = ''
tab_content = ''
first_tab_token = False
if anosim_res is not None:
viewer_name = 'anosim_res'
first_tab_token = True
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Analysis of Similarities</button>\n'''
tab_content += self._generate_variable_stat_tab_content(anosim_res, viewer_name)
if permanova_res is not None:
viewer_name = 'permanova_res'
if first_tab_token:
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Permutational Multivariate Analysis of Variance</button>\n'''
else:
first_tab_token = True
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Permutational Multivariate Analysis of Variance</button>\n'''
tab_content += self._generate_variable_stat_tab_content(permanova_res, viewer_name)
if permdisp_res is not None:
viewer_name = 'permdisp_res'
if first_tab_token:
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Homogeneity Multivariate Analysis of Variance</button>\n'''
else:
# first_tab_token = True
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Homogeneity Multivariate Analysis of Variance</button>\n'''
tab_content += self._generate_variable_stat_tab_content(permdisp_res, viewer_name)
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_rarefy_visualization_content(self, output_directory,
rarefied_matrix_dir, rarecurve_image,
obs_vs_rare_image, random_rare_df):
tab_def_content = ''
tab_content = ''
row_data_summary = random_rare_df.T.describe().round(2).to_string()
col_data_summary = random_rare_df.describe().round(2).to_string()
tab_def_content = ''
tab_content = ''
viewer_name = 'data_summary'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Rarefied Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
tab_content += '''\n<h5>Rarefied Matrix Size: {} x {}</h5>'''.format(
len(random_rare_df.index),
len(random_rare_df.columns))
tab_content += '''\n<h5>Row Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
tab_content += '''\n<h5>Column Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
if False and len(random_rare_df.columns) <= 200:
viewer_name = 'MatrixLinearPlotViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Linear Plot</button>\n'''
linear_plot_page = self._generate_linear_plot(random_rare_df, output_directory)
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(linear_plot_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
viewer_name = 'RarefiedMatrixViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Rarefied Matrix Heatmap</button>\n'''
rarefied_matrix_report_files = os.listdir(rarefied_matrix_dir)
rarefied_matrix_index_page = None
for rarefied_matrix_report_file in rarefied_matrix_report_files:
if rarefied_matrix_report_file.endswith('.html'):
rarefied_matrix_index_page = rarefied_matrix_report_file
shutil.copy2(os.path.join(rarefied_matrix_dir, rarefied_matrix_report_file),
output_directory)
tab_content += self._generate_tab_content(rarefied_matrix_index_page, viewer_name)
rarecurve_image_name = os.path.basename(rarecurve_image)
shutil.copy2(rarecurve_image,
os.path.join(output_directory, rarecurve_image_name))
obs_vs_rare_image_name = os.path.basename(obs_vs_rare_image)
shutil.copy2(obs_vs_rare_image,
os.path.join(output_directory, obs_vs_rare_image_name))
viewer_name = 'RarecurvePlot'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Rarecurve Plot</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '''\n<img src="{}" '''.format(rarecurve_image_name)
tab_content += '''alt="rarecurve" width="600" height="600">\n'''
tab_content += '''<br>\n<br>\n'''
tab_content += '''\n<img src="{}" '''.format(obs_vs_rare_image_name)
tab_content += '''alt="rarecurve" width="600" height="600">\n'''
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_trans_visualization_content(self, output_directory,
operations, heatmap_html_dir_l,
transformed_matrix_df, variable_specific):
row_data_summary = transformed_matrix_df.T.describe().round(2).to_string()
col_data_summary = transformed_matrix_df.describe().round(2).to_string()
tab_def_content = ''
tab_content = ''
op_2_name = {
'abundance_filtering': 'Filtered',
'standardization': 'Standardized',
'ratio_transformation': 'Log Ratio Transformed',
'relative_abundance': 'Relative Abundance',
'logit': 'Logit',
'sqrt': 'Square Root',
'log': 'Log',
}
## Start tabs ##
tab_def_content += '''\n<div class="tab">\n'''
## Operations tabs ##
for i, (op, heatmap_html_dir) in enumerate(zip(operations, heatmap_html_dir_l)):
viewer_name = 'op%s_%s' % (i, op)
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '%s')"''' % viewer_name
tab_def_content += '''>%d. %s</button>\n''' % (i+1, op_2_name[op])
flnms = os.listdir(heatmap_html_dir)
heatmap_html_flnm = None
for flnm in flnms:
if flnm.endswith('.html'):
heatmap_html_flnm = flnm
shutil.copy2(os.path.join(heatmap_html_dir, flnm), output_directory)
tab_content += self._generate_tab_content(heatmap_html_flnm, viewer_name)
## Transformed matrix statistics tab ##
viewer_name = 'data_summary'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
if variable_specific:
tab_def_content += '''>Transformed Selected Variables Statistics</button>\n'''
else:
tab_def_content += '''>Transformed Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
if variable_specific:
tab_content += '''\n<h5>Transformed Selected Variables Size: {} x {}</h5>'''.format(
len(transformed_matrix_df.index),
len(transformed_matrix_df.columns))
else:
tab_content += '''\n<h5>Transformed Matrix Size: {} x {}</h5>'''.format(
len(transformed_matrix_df.index),
len(transformed_matrix_df.columns))
tab_content += '''\n<h5>Row Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
tab_content += '''\n<h5>Column Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_linear_plot(self, data_df, output_directory, row_name='abundance',
top_percent=100):
linear_plot_path = 'linear_plot.html'
sum_order = data_df.sum(axis=1).sort_values(ascending=False).index
data_df = data_df.reindex(sum_order)
top_index = data_df.index[:int(data_df.index.size * top_percent / 100)]
data_df = data_df.loc[top_index]
links = data_df.stack().reset_index()
col_names = links.columns
links.rename(columns={col_names[0]: row_name,
col_names[1]: 'samples',
col_names[2]: 'value'},
inplace=True)
fig = px.line(links, x=row_name, y='value', color='samples')
plot(fig, filename=os.path.join(output_directory, linear_plot_path))
return linear_plot_path
def _create_chem_abun_heatmap(self, output_directory, data_groups):
data_df = pd.concat(data_groups.values())
col_ordered_label = self._compute_cluster_label_order(data_df.T.values.tolist(),
data_df.T.index.tolist())
data_df = data_df.reindex(columns=col_ordered_label)
data_label_groups_pos = dict()
for group_name, data_group_df in data_groups.items():
if pd.isna(group_name[1]):
label_name = group_name[0]
else:
label_name = '{} ({})'.format(group_name[0], group_name[1])
data_label_groups_pos[label_name] = [
data_df.index.to_list().index(data_id) for data_id in data_group_df.index]
heatmap_file_name = 'chem_abun_heatmap_{}.html'.format(str(uuid.uuid4()))
heatmap_path = os.path.join(output_directory, heatmap_file_name)
colors = px.colors.sequential.OrRd
colorscale = [[0, colors[1]], # 0
[1./10000, colors[2]], # 10
[1./1000, colors[3]], # 100
[1./100, colors[4]], # 1000
[1./10, colors[5]], # 10000
[1., colors[6]]]
layout = go.Layout(xaxis={'type': 'category'},
yaxis={'type': 'category'})
fig = go.Figure(data=go.Heatmap(
z=data_df.values,
x=data_df.columns,
y=data_df.index,
hoverongaps=False,
coloraxis='coloraxis'), layout=layout)
width = max(15 * data_df.columns.size, 1400)
height = max(10 * data_df.index.size, 1000)
fig.update_layout(coloraxis=dict(colorscale=colorscale),
plot_bgcolor='rgba(0,0,0,0)',
autosize=True,
width=width,
height=height,
xaxis=dict(tickangle=45,
automargin=True,
tickfont=dict(color='black', size=8)),
yaxis=dict(automargin=True,
tickfont=dict(color='black', size=8)))
colors = px.colors.qualitative.Bold
chemical_types = ['aggregate', 'exometabolite', 'specific']
text_height = 0
col_size = width / data_df.columns.size
label_pos = 70 / col_size
if len(data_label_groups_pos) > 1:
for i, label_name in enumerate(data_label_groups_pos):
data_label_idx = data_label_groups_pos[label_name]
chemical_type = label_name.split(' ')[0]
if i == 0:
fig.update_layout(yaxis=dict(range=[0, data_df.index.size-1],
tickvals=data_label_idx,
automargin=True,
tickfont=dict(
color=colors[chemical_types.index(chemical_type)],
size=8)))
text_height += len(data_label_idx) - 1
fig.add_annotation(x=label_pos, y=0.5,
ax=label_pos, ay=text_height,
text=label_name,
showarrow=True,
xref="x", yref="y",
axref="x", ayref="y",
arrowside='start',
# arrowwidth=1.5,
font=dict(color=colors[chemical_types.index(chemical_type)],
size=8))
else:
fig.add_trace(dict(yaxis='y{}'.format(i + 1)))
fig.update_layout({'yaxis{}'.format(i + 1): dict(
range=[0, data_df.index.size-1],
tickvals=data_label_idx,
ticktext=[data_df.index[i] for i in data_label_idx],
tickfont=dict(color=colors[chemical_types.index(chemical_type)], size=8),
automargin=True,
overlaying='y')})
text_height += len(data_label_idx)
fig.add_annotation(x=label_pos, y=text_height - len(data_label_idx) + 1,
ax=label_pos, ay=text_height,
text=label_name,
showarrow=True,
xref="x", yref="y",
axref="x", ayref="y",
arrowside='start',
# arrowwidth=1.5,
font=dict(color=colors[chemical_types.index(chemical_type)],
size=8))
plot(fig, filename=heatmap_path)
return heatmap_file_name
def _generate_chem_visualization_content(self, output_directory, data_groups):
tab_def_content = ''
tab_content = ''
viewer_name = 'data_summary'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
chemical_types = list(data_groups.keys())
chemical_types = ['{} ({})'.format(item[0], item[1]) for item in chemical_types]
type_text = 'Chemical Type' if len(chemical_types) == 1 else 'Chemical Types'
tab_content += '''\n<h5>{}: {}</h5>'''.format(type_text,
', '.join(chemical_types))
for chemical_type, data_df in data_groups.items():
chemical_type = '{} ({})'.format(chemical_type[0], chemical_type[1])
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
row_data_summary = data_df.T.describe().round(2).to_string()
col_data_summary = data_df.describe().round(2).to_string()
tab_content += '''\n<h5>{} Chemical Matrix Size: {} x {}</h5>'''.format(
chemical_type[0].upper() + chemical_type[1:],
len(data_df.index),
len(data_df.columns))
tab_content += '''\n<h5>{} Row Aggregating Statistics</h5>'''.format(
chemical_type[0].upper() + chemical_type[1:])
html = '''\n<pre class="tab">''' + \
str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<h5>{} Column Aggregating Statistics</h5>'''.format(
chemical_type[0].upper() + chemical_type[1:])
html = '''\n<pre class="tab">''' + \
str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
heatmap_index_page = self._create_chem_abun_heatmap(output_directory, data_groups)
viewer_name = 'MatrixHeatmapViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Heatmap</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(heatmap_index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_visualization_content(self, output_directory, heatmap_dir, data_df,
top_heatmap_dir, top_percent):
row_data_summary = data_df.T.describe().round(2).to_string()
col_data_summary = data_df.describe().round(2).to_string()
tab_def_content = ''
tab_content = ''
viewer_name = 'data_summary'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
tab_content += '''\n<h5>Matrix Size: {} x {}</h5>'''.format(len(data_df.index),
len(data_df.columns))
tab_content += '''\n<h5>Row Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
tab_content += '''\n<h5>Column Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
if top_heatmap_dir:
viewer_name = 'TopHeatmapViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Top {} Percent Heatmap</button>\n'''.format(top_percent)
heatmap_report_files = os.listdir(top_heatmap_dir)
heatmap_index_page = None
for heatmap_report_file in heatmap_report_files:
if heatmap_report_file.endswith('.html'):
heatmap_index_page = heatmap_report_file
shutil.copy2(os.path.join(top_heatmap_dir, heatmap_report_file),
output_directory)
if heatmap_index_page:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
msg = 'Top {} percent of matrix sorted by sum of abundance values.'.format(
top_percent)
tab_content += '''<p style="color:red;" >{}</p>'''.format(msg)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(heatmap_index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '''\n<p style="color:red;" >'''
tab_content += '''Heatmap is too large to be displayed.</p>\n'''
tab_content += '\n</div>\n'
if False and len(data_df.columns) <= 200:
if top_heatmap_dir:
viewer_name = 'MatrixLinearPlotViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Top {} Percent Linear Plot</button>\n'''.format(top_percent)
linear_plot_page = self._generate_linear_plot(data_df, output_directory,
top_percent=top_percent)
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
msg = 'Top {} percent of matrix sorted by sum of abundance values.'.format(
top_percent)
tab_content += '''<p style="color:red;" >{}</p>'''.format(msg)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(linear_plot_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
viewer_name = 'MatrixLinearPlotViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Linear Plot</button>\n'''
linear_plot_page = self._generate_linear_plot(data_df, output_directory)
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(linear_plot_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
viewer_name = 'MatrixHeatmapViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Heatmap</button>\n'''
heatmap_report_files = os.listdir(heatmap_dir)
heatmap_index_page = None
for heatmap_report_file in heatmap_report_files:
if heatmap_report_file.endswith('.html'):
heatmap_index_page = heatmap_report_file
shutil.copy2(os.path.join(heatmap_dir, heatmap_report_file),
output_directory)
if heatmap_index_page:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(heatmap_index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '''\n<p style="color:red;" >'''
tab_content += '''Heatmap is too large to be displayed.</p>\n'''
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_mantel_test_html_report(self, pwmantel_res):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'mantel_test_viewer_report.html')
visualization_content = self._generate_mantel_test_visualization_content(pwmantel_res)
table_style_content = '''
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 100%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>'''
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('</style>',
table_style_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Mantel Test App'
})
return html_report
def _generate_simper_html_report(self, simper_ret, simper_sum, species_stats, grouping_names):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'simper_viewer_report.html')
visualization_content = self._generate_simper_visualization_content(simper_ret,
simper_sum,
species_stats,
grouping_names,
output_directory)
table_style_content = '''
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 66%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>'''
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('</style>',
table_style_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Simper App'
})
return html_report
def _generate_variable_stats_html_report(self, anosim_res, permanova_res, permdisp_res):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'variable_stats_viewer_report.html')
visualization_content = self._generate_variable_stats_visualization_content(anosim_res,
permanova_res,
permdisp_res)
table_style_content = '''
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 66%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>'''
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('</style>',
table_style_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Variable Stats App'
})
return html_report
def _generate_rarefy_html_report(self, rarefied_matrix_dir,
rarecurve_image, obs_vs_rare_image, random_rare_df):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'rarefy_matrix_viewer_report.html')
visualization_content = self._generate_rarefy_visualization_content(
output_directory,
rarefied_matrix_dir,
rarecurve_image,
obs_vs_rare_image,
random_rare_df)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Transform Matrix App'
})
return html_report
def _generate_transform_html_report(self, operations, heatmap_html_dir_l,
transformed_matrix_df, variable_specific):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'transform_matrix_viewer_report.html')
visualization_content = self._generate_trans_visualization_content(
output_directory,
operations,
heatmap_html_dir_l,
transformed_matrix_df,
variable_specific)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Transform Matrix App'
})
return html_report
def _compute_cluster_label_order(self, values, labels):
# values = [[1, 0, 21, 50, 1], [20, 0, 60, 80, 30], [30, 60, 1, -10, 20]]
# labels = ['model_1', 'model_2', 'model_3']
if len(labels) == 1:
return labels
dist_matrix = pdist(values)
linkage_matrix = linkage(dist_matrix, 'ward')
dn = dendrogram(linkage_matrix, labels=labels, distance_sort='ascending')
ordered_label = dn['ivl']
return ordered_label
def _generate_chem_abund_heatmap_html_report(self, data, metadata_df):
logging.info('Start generating chemical abundance heatmap report page')
data_df = pd.DataFrame(data['values'], index=data['row_ids'], columns=data['col_ids'])
result_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(result_directory)
group_by = ['chemical_type', 'units']
metadata_groups = metadata_df.groupby(by=group_by).groups
data_groups = dict()
for group_name, ids in metadata_groups.items():
chem_type_data = data_df.loc[ids]
idx_ordered_label = self._compute_cluster_label_order(chem_type_data.values.tolist(),
chem_type_data.index.tolist())
data_groups[group_name] = chem_type_data.reindex(index=idx_ordered_label)
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'matrix_viewer_report.html')
visualization_content = self._generate_chem_visualization_content(output_directory,
data_groups)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Import Matrix App'
})
return html_report
def _generate_heatmap_html_report(self, data):
logging.info('Start generating heatmap report page')
data_df = pd.DataFrame(data['values'], index=data['row_ids'], columns=data['col_ids'])
result_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(result_directory)
tsv_file_path = os.path.join(result_directory, 'heatmap_data_{}.tsv'.format(
str(uuid.uuid4())))
data_df.to_csv(tsv_file_path)
heatmap_dir = self.report_util.build_heatmap_html({
'tsv_file_path': tsv_file_path,
'cluster_data': True})['html_dir']
top_heatmap_dir = None
top_percent = 100
if len(data_df.index) > 500:
display_count = 200 # roughly count for display items
top_percent = min(int(display_count / len(data_df.index) * 100), 100)
top_percent = max(top_percent, 1)
top_heatmap_dir = self.report_util.build_heatmap_html({
'tsv_file_path': tsv_file_path,
'sort_by_sum': True,
'top_percent': top_percent})['html_dir']
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'matrix_viewer_report.html')
visualization_content = self._generate_visualization_content(output_directory,
heatmap_dir,
data_df,
top_heatmap_dir,
top_percent)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Import Matrix App'
})
return html_report
def _generate_rarefy_report(self, new_matrix_obj_ref, workspace_id,
random_rare_df, rarecurve_image, obs_vs_rare_image,
warnings):
objects_created = [{'ref': new_matrix_obj_ref, 'description': 'Randomly Rarefied Matrix'}]
data_tsv_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(data_tsv_directory)
logging.info('Start generating matrix tsv files in {}'.format(data_tsv_directory))
rarefied_matrix_tsv_path = os.path.join(data_tsv_directory,
'rarefied_matrix_{}.tsv'.format(
str(uuid.uuid4())))
random_rare_df.to_csv(rarefied_matrix_tsv_path)
rarefied_matrix_dir = self.report_util.build_heatmap_html({
'tsv_file_path': rarefied_matrix_tsv_path,
'cluster_data': True})['html_dir']
output_html_files = self._generate_rarefy_html_report(rarefied_matrix_dir,
rarecurve_image,
obs_vs_rare_image,
random_rare_df)
report_params = {'message': '',
'objects_created': objects_created,
'workspace_id': workspace_id,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 1400,
'report_object_name': 'rarefy_matrix_' + str(uuid.uuid4()),
'warnings': warnings}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_transform_report(self, new_matrix_obj_ref, workspace_id,
operations, df_results, variable_specific=False):
objects_created = [{'ref': new_matrix_obj_ref, 'description': 'Transformed Matrix'}]
data_tsv_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(data_tsv_directory)
heatmap_html_dir_l = []
for i, (op, df) in enumerate(zip(operations, df_results)):
tsv_path = os.path.join(data_tsv_directory, 'op%d_%s.tsv' % (i, op))
df.to_csv(tsv_path)
heatmap_html_dir = self.report_util.build_heatmap_html({
'tsv_file_path': tsv_path,
'cluster_data': True
})['html_dir']
heatmap_html_dir_l.append(heatmap_html_dir)
output_html_files = self._generate_transform_html_report(operations, heatmap_html_dir_l,
df_results[-1],
variable_specific)
report_params = {'message': '',
'objects_created': objects_created,
'workspace_id': workspace_id,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 1400,
'report_object_name': 'transform_matrix_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_mantel_test_report(self, workspace_id, pwmantel_res):
output_html_files = self._generate_mantel_test_html_report(pwmantel_res)
report_params = {'message': '',
'workspace_id': workspace_id,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 300,
'report_object_name': 'mantel_test_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_simper_report(self, workspace_id, simper_ret, simper_sum,
species_stats, grouping_names):
output_html_files = self._generate_simper_html_report(simper_ret, simper_sum,
species_stats, grouping_names)
report_params = {'message': '',
'workspace_id': workspace_id,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 450,
'report_object_name': 'simper_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_variable_stats_report(self, workspace_id,
anosim_res, permanova_res, permdisp_res):
output_html_files = self._generate_variable_stats_html_report(anosim_res,
permanova_res,
permdisp_res)
report_params = {'message': '',
'workspace_id': workspace_id,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 450,
'report_object_name': 'variable_stats_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_report(self, matrix_obj_ref, workspace_name, new_row_attr_ref=None,
new_col_attr_ref=None, data=None, metadata_df=None):
"""
_generate_report: generate summary report
"""
objects_created = [{'ref': matrix_obj_ref, 'description': 'Imported Matrix'}]
if new_row_attr_ref:
objects_created.append({'ref': new_row_attr_ref,
'description': 'Imported Row Attribute Mapping'})
if new_col_attr_ref:
objects_created.append({'ref': new_col_attr_ref,
'description': 'Imported Column Attribute Mapping'})
if data:
if metadata_df is not None:
output_html_files = self._generate_chem_abund_heatmap_html_report(data,
metadata_df)
else:
output_html_files = self._generate_heatmap_html_report(data)
report_params = {'message': '',
'objects_created': objects_created,
'workspace_name': workspace_name,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 1400,
'report_object_name': 'import_matrix_from_excel_' + str(uuid.uuid4())}
else:
report_params = {'message': '',
'objects_created': objects_created,
'workspace_name': workspace_name,
'report_object_name': 'import_matrix_from_excel_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
@staticmethod
def _process_mapping_sheet(file_path, sheet_name):
"""
_process_mapping: process mapping sheet
"""
try:
df = pd.read_excel(file_path, sheet_name=sheet_name, dtype='str')
except XLRDError:
return dict()
else:
mapping = {value[0]: value[1] for value in df.values.tolist()}
return mapping
def _process_attribute_mapping_sheet(self, file_path, sheet_name, matrix_name, workspace_id):
"""
_process_attribute_mapping_sheet: process attribute_mapping sheet
"""
try:
df = pd.read_excel(file_path, sheet_name=sheet_name, index_col=0)
except XLRDError:
return ''
else:
obj_name = f'{matrix_name}_{sheet_name}'
result_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(result_directory)
file_path = os.path.join(result_directory, '{}.xlsx'.format(obj_name))
df.to_excel(file_path)
import_attribute_mapping_params = {
'output_obj_name': obj_name,
'output_ws_id': workspace_id,
'input_file_path': file_path
}
ref = self.attr_util.file_to_attribute_mapping(import_attribute_mapping_params)
return ref.get('attribute_mapping_ref')
@staticmethod
def _file_to_df(file_path):
logging.info('start parsing file content to data frame')
try:
df = pd.read_excel(file_path, sheet_name='data', index_col=0)
except XLRDError:
try:
df = | pd.read_excel(file_path, index_col=0) | pandas.read_excel |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/1316:45
# @Author : DaiPuWei
# E-Mail : <EMAIL>
# blog : https://blog.csdn.net/qq_30091945
# @Site : 中国民航大学北教25实验室506
# @File : BostonHousing.py
# @Software: PyCharm
from LinearRegression.LinearRegression import LinearRegression
from LocalWeightedLinearRegression.LocalWeightedLinearRegression \
import LocalWeightedLinearRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def Merge(data,col):
"""
这是生成DataFrame数据的函数
:param data:输入数据
:param col:列名称数组
"""
Data = np.array(data).T
return | pd.DataFrame(Data,columns=col) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), | u('out') | pandas.compat.u |
#!/usr/bin/env python
r"""Test :py:class:`~solarwindpy.core.vector.Vector` and :py:class:`~solarwindpy.core.tensor.Tensor`.
"""
import pdb
# import re as re
import numpy as np
import pandas as pd
import unittest
import sys
import pandas.testing as pdt
from unittest import TestCase
from abc import ABC, abstractproperty
from scipy import constants
# import test_base as base
from solarwindpy.tests import test_base as base
from solarwindpy import vector
from solarwindpy import tensor
pd.set_option("mode.chained_assignment", "raise")
class QuantityTestBase(ABC):
def test_data(self):
data = self.data
if isinstance(data, pd.Series):
pdt.assert_series_equal(data, self.object_testing.data)
else:
pdt.assert_frame_equal(data, self.object_testing.data)
def test_eq(self):
print_inline_debug = False
object_testing = self.object_testing
# ID should be equal.
self.assertEqual(object_testing, object_testing)
# Data and type should be equal.
new_object = object_testing.__class__(object_testing.data)
if print_inline_debug:
print(
"<Test>",
"<object_testing>",
type(object_testing),
object_testing,
object_testing.data,
"<new_object>",
type(new_object),
new_object,
new_object.data,
"",
sep="\n",
)
self.assertEqual(object_testing, new_object)
def test_neq(self):
object_testing = self.object_testing
# Data isn't equal
self.assertNotEqual(
object_testing, object_testing.__class__(object_testing.data * 4)
)
# Type isn't equal
for other in (
[],
tuple(),
np.array([]),
pd.Series(dtype=np.float64),
pd.DataFrame(dtype=np.float64),
):
self.assertNotEqual(object_testing, other)
def test_empty_data_catch(self):
with self.assertRaisesRegex(
ValueError, "You can't set an object with empty data."
):
self.object_testing.__class__(pd.DataFrame())
#####
# Vectors
#####
class VectorTestBase(QuantityTestBase):
def test_components(self):
# print("test_components")
# print(self.data.iloc[:, :7], flush=True)
v = self.data
# print(v, file=sys.stdout)
pdt.assert_series_equal(v.x, self.object_testing.x)
pdt.assert_series_equal(v.y, self.object_testing.y)
pdt.assert_series_equal(v.z, self.object_testing.z)
def test_mag(self):
# print("test_mag")
# print(self.data.iloc[:, :7], flush=True)
x = self.data.x
y = self.data.y
z = self.data.z
# print(v, file=sys.stdout)
mag = np.sqrt(x.pow(2) + y.pow(2) + z.pow(2))
# mag = self.data.loc[:, ["x", "y", "z"]].pow(2).sum(axis=1).pipe(np.sqrt)
mag.name = "mag"
# print("", self.data, mag, self.object_testing.mag, sep="\n")
pdt.assert_series_equal(mag, self.object_testing.mag)
pdt.assert_series_equal(mag, self.object_testing.magnitude)
pdt.assert_series_equal(self.object_testing.mag, self.object_testing.magnitude)
def test_rho(self):
# print("test_rho")
x = self.data.x
y = self.data.y
rho = np.sqrt(x.pow(2) + y.pow(2))
rho.name = "rho"
pdt.assert_series_equal(rho, self.object_testing.rho)
def test_colat(self):
# print("test_colat")
x = self.data.x
y = self.data.y
z = self.data.z
colat = np.arctan2(z, np.sqrt(x.pow(2) + y.pow(2)))
colat = np.rad2deg(colat)
colat.name = "colat"
pdt.assert_series_equal(colat, self.object_testing.colat)
def test_longitude(self):
# print("test_longitude")
x = self.data.x
y = self.data.y
lon = np.arctan2(y, x)
lon = np.rad2deg(lon)
lon.name = "longitude"
pdt.assert_series_equal(lon, self.object_testing.lon)
pdt.assert_series_equal(lon, self.object_testing.longitude)
pdt.assert_series_equal(self.object_testing.lon, self.object_testing.longitude)
def test_r(self):
# print("test_r")
x = self.data.x
y = self.data.y
z = self.data.z
r = np.sqrt(x.pow(2) + y.pow(2) + z.pow(2))
r.name = "r"
pdt.assert_series_equal(r, self.object_testing.r)
pdt.assert_series_equal(r, self.object_testing.mag, check_names=False)
pdt.assert_series_equal(
self.object_testing.r, self.object_testing.mag, check_names=False
)
def test_cartesian(self):
v = self.data.loc[:, ["x", "y", "z"]]
pdt.assert_frame_equal(v, self.object_testing.cartesian)
def test_unit_vector(self):
v = self.data.loc[:, ["x", "y", "z"]]
mag = v.pow(2).sum(axis=1).pipe(np.sqrt)
uv = v.divide(mag, axis=0)
uv.name = "uv"
uv = vector.Vector(uv)
pdt.assert_frame_equal(uv.data, self.object_testing.unit_vector.data)
pdt.assert_frame_equal(uv.data, self.object_testing.uv.data)
pdt.assert_frame_equal(
self.object_testing.uv.data, self.object_testing.unit_vector.data
)
self.assertEqual(uv, self.object_testing.unit_vector)
self.assertEqual(uv, self.object_testing.uv)
self.assertEqual(self.object_testing.unit_vector, self.object_testing.uv)
def test_project(self):
b = (
base.TestData()
.plasma_data.xs("b", axis=1, level="M")
.xs("", axis=1, level="S")
.loc[:, ["x", "y", "z"]]
)
# b.setUpClass()
# b = (
# b.data.b.loc[:, ["x", "y", "z"]]
# .xs("", axis=1, level="S")
# .xs("", axis=1, level="N")
# )
bmag = b.pow(2).sum(axis=1).pipe(np.sqrt)
buv = b.divide(bmag, axis=0)
v = self.data.loc[:, ["x", "y", "z"]]
vmag = v.pow(2).sum(axis=1).pipe(np.sqrt)
# vuv = v.divide(vmag, axis=0)
par = v.multiply(buv, axis=1).sum(axis=1)
per = (
v.subtract(buv.multiply(par, axis=0), axis=1)
.pow(2)
.sum(axis=1)
.pipe(np.sqrt)
)
projected = pd.concat([par, per], axis=1, keys=["par", "per"], sort=True)
# print("",
# "<Test>",
# "<buv>", type(buv), buv,
# "<v>", type(v), v,
# "<vmag>", type(vmag), vmag,
# "<vuv>", type(vuv), vuv,
# "<projected>", type(projected), projected,
# "",
# sep="\n")
b = vector.Vector(b)
pdt.assert_frame_equal(projected, self.object_testing.project(b))
# Projecting a thing onto itself should return 1 for parallel
# and 0 for perp.
per = pd.Series(0.0, index=per.index)
projected = | pd.concat([vmag, per], axis=1, keys=["par", "per"], sort=True) | pandas.concat |
# Author: <NAME>
import numpy as np
import pandas as pd
import geohash
from . import datasets
# helper functions
def decode_geohash(df):
print('Decoding geohash...')
df['lon'], df['lat'] = zip(*[(latlon[1], latlon[0]) for latlon
in df['geohash6'].map(geohash.decode)])
return df
def cap(old):
"""Caps predicted values to [0, 1]"""
new = [min(1, y) for y in old]
new = [max(0, y) for y in new]
return np.array(new)
# core functions
def expand_timestep(df, test_data):
"""Expand data to include full timesteps for all TAZs, filled with zeros.
Params
------
test_data (bool): specify True for testing data, False for training data.
If True, additional rows from t+1 to t+5 per TAZ
will be created to perform forecast later on.
"""
# extract coordinates
df = decode_geohash(df)
# expand all TAZs by full timesteps
min_ts = int(df['timestep'].min())
max_ts = int(df['timestep'].max())
if test_data:
print('Expanding testing data and fill NaNs with '
'0 demands for all timesteps per TAZ; '
'also generating T+1 to T+5 slots for forecasting...')
timesteps = list(range(min_ts, max_ts + 7)) # predicting T+1 to T+6
else:
print('Expanding training data and fill NaNs with '
'0 demands for all timesteps per TAZ...')
timesteps = list(range(min_ts, max_ts + 1))
print('Might take a moment depending on machines...')
# create full df skeleton
full_df = pd.concat([pd.DataFrame({'geohash6': taz,
'timestep': timesteps})
for taz in df['geohash6'].unique()],
ignore_index=True,
sort=False)
# merge back fixed features: TAZ-based, timestep-based
taz_info = ['geohash6', 'label_weekly_raw', 'label_weekly',
'label_daily', 'label_quarterly', 'active_rate', 'lon', 'lat']
ts_info = ['day', 'timestep', 'weekly', 'quarter', 'hour', 'dow']
demand_info = ['geohash6', 'timestep', 'demand']
full_df = full_df.merge(df[taz_info].drop_duplicates(),
how='left', on=['geohash6'])
full_df = full_df.merge(df[ts_info].drop_duplicates(),
how='left', on=['timestep'])
# NOTE: there are 9 missing timesteps:
# 1671, 1672, 1673, 1678, 1679, 1680, 1681, 1682, 1683
# also, the new t+1 to t+5 slots in test data will miss out ts_info
# a = set(df['timestep'].unique())
# b = set(timesteps)
# print(a.difference(b))
# print(b.difference(a))
# fix missing timestep-based information:
missing = full_df[full_df['day'].isna()]
patch = datasets.process_timestamp(missing, fix=True)
full_df.fillna(patch, inplace=True)
# merge row-dependent feature: demand
full_df = full_df.merge(df[demand_info].drop_duplicates(),
how='left', on=['geohash6', 'timestep'])
full_df['demand'].fillna(0, inplace=True)
if test_data:
full_df.loc[full_df['timestep'] > max_ts, 'demand'] = -1
print('Done.')
print('Missing values:')
print(full_df.isna().sum())
return full_df
def get_history(df, periods):
"""
Append historical demands of TAZs as a new feature
from `periods` of timesteps (15-min) before.
"""
# create diff_zone indicator (curr TAZ != prev TAZ (up to periods) row-wise)
shft = pd.DataFrame.shift(df[['geohash6', 'demand']], periods=periods)
diff_zone = df['geohash6'] != shft['geohash6']
shft.loc[diff_zone, 'demand'] = -1 # set -1 if different TAZ
df['demand_t-%s' % periods] = shft['demand']
df['demand_t-%s' % periods].fillna(-1, inplace=True) # set NaNs to -1
return df
def generate_features(df, history):
""""""
if history is not None:
print('Retrieving historical demands...')
[get_history(df, h) for h in history]
print('Generating features...')
# NOTE: be aware of timezones (see explore_function segmentation.ipynb)
# df['am_peak'] = ((df['hour'] >= 22) | (df['hour'] <= 2)).astype(int)
# df['midnight'] = ((df['hour'] >= 17) & (df['hour'] < 22)).astype(int)
df['weekend'] = (df['dow'] > 4).astype(int)
df['st_trend'] = df['demand_t-1'] - df['demand_t-2']
df['mt_trend'] = df['demand_t-1'] - df['demand_t-5']
df['st_trend_1d'] = df['demand_t-96'] - df['demand_t-97']
df['mt_trend_1d'] = df['demand_t-96'] - df['demand_t-101']
df['st_trend_1w'] = df['demand_t-672'] - df['demand_t-673']
df['mt_trend_1w'] = df['demand_t-672'] - df['demand_t-677']
df['lt_trend_1d'] = df['demand_t-96'] - df['demand_t-672']
print('Done.')
return df
def get_train_validate(full_df, features, split):
"""Generate training and validation sets with features."""
X = full_df[features + ['demand']]
print('[dtypes of features (including demand):]')
print(X.dtypes.value_counts())
print('\nSplit train and validation sets on day', split)
X_train = X[X['day'] <= split]
X_val = X[X['day'] > split]
y_train = X_train.pop('demand')
y_val = X_val.pop('demand')
days_train = len(X_train['day'].unique())
days_val = len(X_val['day'].unique())
print('')
print(days_train, 'days in train set.')
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('')
print(days_val, 'days in validation set.')
print('X_val:', X_val.shape)
print('y_val:', y_val.shape)
return X_train, X_val, y_train, y_val
def get_test_forecast(full_df, features):
"""Generate testing and forecasting sets with features."""
# TODO: same functionality, merge with get_train_validate
X = full_df[features + ['demand']]
print('[dtypes of features (including demand):]')
print(X.dtypes.value_counts())
# get the horizons for final forecasting
print('\nSplit test and forecast sets')
split = X['timestep'].max() - 6
X_test = X[X['timestep'] <= split]
X_forecast = X[X['timestep'] > split]
y_test = X_test.pop('demand')
y_forecast = X_forecast.pop('demand')
print('X_test:', X_test.shape)
print('y_test:', y_test.shape)
print('X_forecast:', X_forecast.shape)
print('y_forecast:', y_forecast.shape)
return X_test, X_forecast, y_test, y_forecast
def get_forecast_output(full_df, y_forecast, shift=False, path=None):
"""Generate the forecast output following the training data format.
Params
------
full_df (dataframe): as generated from `models.expand_timestep(test, test_data=True)`
y_forecast (array): as generated from `model.predict(X_forecast)`
shift (bool): if True, all forecast results will be shifted 1 timestep ahead,
i.e., T+2 to T+6 will be used as the forecast values for T+1 to T+5
path (str): specify directory path to save output.csv
Returns
-------
X_forecast (dataframe): the final output dataframe containing forecast values for
all TAZs from T+1 to T+5 following the final T in test data,
in the format of input data.
"""
X = full_df[['geohash6', 'day', 'timestep']]
# get the horizons for final forecasting
split = X['timestep'].max() - 6
X_forecast = X[X['timestep'] > split].sort_values(['geohash6', 'timestep'])
# formatting and convert timestep back to timestamp
X_forecast['timestamp'] = datasets.tstep_to_tstamp(X_forecast.pop('timestep'))
X_forecast['day'] = X_forecast['day'].astype(int)
# append forecast results
y_forecast = cap(y_forecast) # calibrate results beyond boundaries [0, 1]
X_forecast['demand'] = y_forecast
# drop additional T+6 horizon, after shifting if specified
shft = | pd.DataFrame.shift(X_forecast[['geohash6', 'demand']], periods=-1) | pandas.DataFrame.shift |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
],
[pd.DataFrame([]), pd.DataFrame([], index=[100])],
[
pd.DataFrame([]),
pd.DataFrame([], index=[100]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
)
@pytest.mark.parametrize(
"other",
[
[[1, 2], [10, 100]],
[[1, 2, 10, 100, 0.1, 0.2, 0.0021]],
[[]],
[[], [], [], []],
[[0.23, 0.00023, -10.00, 100, 200, 1000232, 1232.32323]],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
ps = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Can only append a Series if ignore_index=True "
"or if the Series has a name",
):
df.append(ps)
def test_cudf_arrow_array_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Table via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Table, consider "
"using .to_arrow()",
):
df.__arrow_array__()
sr = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
sr = cudf.Series(["a", "b", "c"])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("axis", [0, 1])
def test_dataframe_sample_basic(n, frac, replace, axis):
# as we currently don't support column with same name
if axis == 1 and replace:
return
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5],
"float": [0.05, 0.2, 0.3, 0.2, 0.25],
"int": [1, 3, 5, 4, 2],
},
index=[1, 2, 3, 4, 5],
)
df = cudf.DataFrame.from_pandas(pdf)
random_state = 0
try:
pout = pdf.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
except BaseException:
assert_exceptions_equal(
lfunc=pdf.sample,
rfunc=df.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
)
else:
gout = df.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
assert pout.shape == gout.shape
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("random_state", [1, np.random.mtrand.RandomState(10)])
def test_dataframe_reproducibility(replace, random_state):
df = cudf.DataFrame({"a": cupy.arange(0, 1024)})
expected = df.sample(1024, replace=replace, random_state=random_state)
out = df.sample(1024, replace=replace, random_state=random_state)
assert_eq(expected, out)
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
def test_series_sample_basic(n, frac, replace):
psr = pd.Series([1, 2, 3, 4, 5])
sr = cudf.Series.from_pandas(psr)
random_state = 0
try:
pout = psr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
except BaseException:
assert_exceptions_equal(
lfunc=psr.sample,
rfunc=sr.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
)
else:
gout = sr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
assert pout.shape == gout.shape
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_empty(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.empty, gdf.empty)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_size(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.size, gdf.size)
@pytest.mark.parametrize(
"ps",
[
pd.Series(dtype="float64"),
pd.Series(index=[100, 10, 1, 0], dtype="float64"),
pd.Series([], dtype="float64"),
pd.Series(["a", "b", "c", "d"]),
pd.Series(["a", "b", "c", "d"], index=[0, 1, 10, 11]),
],
)
def test_series_empty(ps):
ps = ps
gs = cudf.from_pandas(ps)
assert_eq(ps.empty, gs.empty)
@pytest.mark.parametrize(
"data",
[
[],
[1],
{"a": [10, 11, 12]},
{
"a": [10, 11, 12],
"another column name": [12, 22, 34],
"xyz": [0, 10, 11],
},
],
)
@pytest.mark.parametrize("columns", [["a"], ["another column name"], None])
def test_dataframe_init_with_columns(data, columns):
pdf = pd.DataFrame(data, columns=columns)
gdf = cudf.DataFrame(data, columns=columns)
assert_eq(
pdf,
gdf,
check_index_type=False if len(pdf.index) == 0 else True,
check_dtype=False if pdf.empty and len(pdf.columns) else True,
)
@pytest.mark.parametrize(
"data, ignore_dtype",
[
([pd.Series([1, 2, 3])], False),
([pd.Series(index=[1, 2, 3], dtype="float64")], False),
([pd.Series(name="empty series name", dtype="float64")], False),
(
[pd.Series([1]), pd.Series([], dtype="float64"), pd.Series([3])],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
False,
),
([pd.Series([1, 2, 3], name="hi")] * 10, False),
([pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10, False),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
),
(
[
| pd.Series([1, 0.324234, 32424.323, -1233, 34242]) | pandas.Series |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), | Timedelta("-1 days 02:34:57") | pandas.Timedelta |
import re
import queue
import sys
import string
import os
import bs4
from bs4 import Comment
import requests
import urllib.parse
import pandas as pd
import numpy as np
import sqlite3
import sqlalchemy
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import war_calc as war
###############################################################################
# GENERAL PURPOSE WEB SCRAPING FUNCTIONS #
###############################################################################
def get_request(url):
'''
Open a connection to the specified URL and read the connection if successful
Inputs:
url (str): An absolute url
Returns:
Request object or None
'''
if url == '':
r = None
elif urllib.parse.urlparse(url).netloc != '':
try:
r = requests.get(url)
if r.status_code == 404 or r.status_code == 403:
r = None
except Exception:
r = None
else:
r = None
return r
def read_request(request):
'''
Return data from request object.
Returns result of '' if the read fails
'''
try:
return request.text.encode('utf8')
except Exception:
return ''
def get_request_url(request):
'''
Extract the true url from a request)
'''
return request.url
def is_absolute_url(url):
'''
Is the url string an absolute url?
'''
if url == '':
return False
return urllib.parse.urlparse(url).netloc != ''
def remove_fragment(url):
'''
Remove the fragment from a url
'''
(url, frag) = urllib.parse.urldefrag(url)
return url
def convert_if_relative_url(current_url, new_url):
'''
Attempt to determine whether new_url is a realtive url.
If so, use current_url to determine the path and create a new absolute url.
Will add the protocol, if that is all that is missing.
Inputs:
current_url (str): absolute url
new_url (str): the url we are converting
Returns:
new_absolute_url if the new_url can be converted
None if it cannot determine that new_url is a relative_url
'''
if new_url == '' or not is_absolute_url(current_url):
return None
if is_absolute_url(new_url):
return new_url
parsed_url = urllib.parse.urlparse(new_url)
path_parts = parsed_url.path.split('/')
if len(path_parts) == 0:
return None
ext = path_parts[0][-4:]
if ext in ['.edu', '.org', '.com', '.net']:
return 'http://' + new_url
elif new_url[:3] == 'www':
return 'http://' + new_path
else:
return urllib.parse.urljoin(current_url, new_url)
def get_soup(url):
'''
Takes a url string and returns a bs4 object
Inputs:
url (str): a url
Returns:
A BeautifulSoup object
'''
request = get_request(url)
if request != None:
text = read_request(request)
return bs4.BeautifulSoup(text, 'html.parser')
def queue_links(soup, starting_url, link_q, sub='main'):
'''
Given a bs4 object, pull out all the links that need to be crawled
Inputs:
soup (bs4): a bs4 objec tat all link tags (a) can be pulled from
starting_url (str): the initial url that created the soup object
link_q (Queue): the current queue of links to crawl
sub (str): the subcrawl
Returns:
Updated link_q with all link tags that need to be crawled
'''
links = soup.find_all('a', href = True)
for link in links:
href = link.get('href')
no_frag = remove_fragment(href)
clean_link = convert_if_relative_url(starting_url, no_frag)
if is_absolute_url(clean_link):
if okay_url_fbref(clean_link, sub):
if clean_link != starting_url:
if clean_link not in link_q.queue:
link_q.put(clean_link)
return link_q
def to_sql(df, name, db):
'''
Converts a pandas DataFrame to an SQLite table and adds it to a database.
Inputs:
df (DataFrame): a pandas DataFrame created by a get_tables function
title (str): the name of the SQL table we're creating
db (database): a SQL database
Returns:
None
'''
connection = sqlite3.connect(db)
cursor = connection.cursor()
df.to_sql(name, connection, if_exists = "replace")
print('Wrote ', name, 'to', str(db))
cursor.close()
connection.close()
###############################################################################
# SPECIFIC CRAWLERS FOR SITES #
###############################################################################
#################################wikipedia.com#################################
def get_wiki_table():
'''
Scrapes https://en.wikipedia.org/wiki/Premier_League_records_and_statistics
for the all-time goal and wins statistics for every team to determine
on average, how many more goals a team scores than their opponents per win
Inputs:
None
Returns:
gd_per_win (float): The average number of goals per win in the
Premier League
'''
url = 'https://en.wikipedia.org/wiki/Premier_League_records_and_statistics#Goals_2'
soup = get_soup(url)
tables = soup.find_all('table', class_ = "wikitable sortable")
goal_table = tables[4]
columns = goal_table.find_all('th')
columns = [tag.text.strip('.\n') for tag in columns]
table_rows = goal_table.find_all('tr')
data = []
for tr in table_rows:
td = tr.find_all('td')
row = [tr.text for tr in td]
data.append(row)
goals = pd.DataFrame(data, columns = columns)
goals = goals.dropna()
labels_to_drop = ['Pos', 'Pld', 'Pts', '1st', '2nd', '3rd', '4th',
'Relegated', 'BestPos']
goals.drop(columns=labels_to_drop, inplace=True)
goals["GF"] = goals["GF"].str.replace(",","")
goals["GA"] = goals["GA"].str.replace(",","")
goals["GD"] = goals["GD"].str.replace(",","")
goals["GD"] = goals["GD"].str.replace("−","-")
goals = goals.apply(pd.to_numeric, errors='ignore')
goals['Club']=goals['Club'].str.strip('[b]\n')
goals['Games'] = goals['Win']+goals['Draw']+goals['Loss']
goals['Wins_per_season'] = goals['Win'] / goals['Seasons']
goals['GF_per_season'] = goals['GF'] / goals['Seasons']
goals['GA_per_season'] = goals['GA'] / goals['Seasons']
goals['GD_per_season'] = goals['GD'] / goals['Seasons']
return goals
#################################fbref.com#####################################
def okay_url_fbref(url, sub='main'):
'''
Checks if a url is within the limiting_domain of the fbref crawler
Inputs:
url (str): an absolute url
sub (str): which subcrawl are we okaying
Returns:
True if the protocol for the url is http(s), the domain is in the
limiting_domain, and the path is either a directory or a file that
has no extension or ends in .html.
False otherwise or if the url includes a '@'
'''
limiting_domain = 'fbref.com/en/comps/9/'
parsed_url = urllib.parse.urlparse(url)
loc = parsed_url.netloc
ld = len(limiting_domain)
trunc_loc = loc[-(ld+1):]
adv_years = ['2018-2019', '2017-2018']
if url == None:
return False
if 'mailto:' in url or '@' in url:
return False
if parsed_url.scheme != 'http' and parsed_url.scheme != 'https':
return False
if loc == '':
return False
if parsed_url.fragment != '':
return False
if parsed_url.query != '':
return False
if not (limiting_domain in loc+parsed_url.path):
return False
if sub == 'main':
if not '/stats/' in parsed_url.path:
return False
if sub == 'keep_adv':
if not '/keepersadv' in parsed_url.path:
return False
good_year = False
for year in adv_years:
if year in parsed_url.path:
good_year = True
break
if good_year == False:
return False
if sub == 'keep_basic':
if not '/keepers/' in parsed_url.path:
return False
if sub == 'shooting':
if not '/shooting/' in parsed_url.path:
return False
if sub == 'passing':
if not '/passing/' in parsed_url.path:
return False
good_year = False
for year in adv_years:
if year in parsed_url.path:
good_year = True
break
if good_year == False:
return False
(filename, ext) = os.path.splitext(parsed_url.path)
return (ext == '' or ext == '.html')
def get_tables_fbref(soup, db='players.db'):
'''
Takes a https://fbref.com/en/comps/9/####/stats/ page and updates the
players.db sqlite3 database using the tables from the page.
Inputs:
soup (bs4): BeautifulSoup for a fbref.com yearly stats page
db (database): sqlite3 database
Returns:
None
'''
tables = soup.find_all('div', class_ = "table_outer_container")
# get players data in commented out table
players = soup.find_all(text = lambda text: isinstance(text, Comment))
# commented fbref table is the 11th in the list of comments
plays = bs4.BeautifulSoup(players[11], 'html.parser').find('tbody')
table_rows = plays.find_all('tr')
col_tags = bs4.BeautifulSoup(players[11], 'html.parser').find_all('th', scope = 'col')
columns = []
for col in col_tags:
columns.append(col.get_text())
columns = columns[1:]
# get year that data is from on FBref
year = soup.find('li', class_='full').get_text()[:9]
# rename columns
columns[15] = 'Gls_per_game'
columns[16] = 'Ast_per_game'
if len(columns) >= 23:
columns[23] = 'xG_per_game'
columns[24] = 'xA_per_game'
columns[25] = 'xG+xA_per_game'
columns[26] = 'npxG_per_game'
columns[27] = 'npxG+xA_per_game'
# construct the player_data DataFrame
data = []
for tr in table_rows:
td = tr.find_all('td')
row = [tr.text for tr in td]
data.append(row)
player_data = pd.DataFrame(data, columns = columns)
player_data = player_data.dropna()
# drop matches column beacuse it is just a link to matches played
if 'Matches' in player_data.columns:
player_data = player_data.drop(columns = 'Matches')
# clean and parse position column
player_data = player_data.rename(columns={'Pos': 'Pos_1'})
player_data.insert(3, 'Pos_2', None)
player_data[['Pos_1', 'Pos_2']] = player_data.Pos_1.str.split(',', expand=True)
# clean nation column
player_data['Nation'] = player_data['Nation'].str.strip().str[-3:]
# write the main year table to the database
to_sql(player_data, year, db)
# generate 3 additional tables for each year that contain players
# from each of the major positions
positions = ['DF', 'FW', 'MF']
for pos in positions:
pos_1 = player_data.loc[(player_data['Pos_1']==pos) \
& (player_data['Pos_2'].isnull())]
title = year + '-' + pos
to_sql(pos_1, title, db)
# generate the the wingback table and write to the database
df_mf = player_data[(player_data['Pos_1'] == 'DF') \
& (player_data['Pos_2'] == 'MF')]
mf_df = player_data[(player_data['Pos_1'] == 'MF') \
& (player_data['Pos_2'] == 'DF')]
wb = | pd.concat([df_mf, mf_df]) | pandas.concat |
import pandas as pd
import numpy as np
import datetime
import sys
import time
import xgboost as xgb
from add_feture import *
FEATURE_EXTRACTION_SLOT = 10
LabelDay = datetime.datetime(2014,12,18,0,0,0)
Data = pd.read_csv("../../../../data/fresh_comp_offline/drop1112_sub_item.csv")
Data['daystime'] = Data['days'].map(lambda x: time.strptime(x, "%Y-%m-%d")).map(lambda x: datetime.datetime(*x[:6]))
def get_train(train_user,end_time):
# 取出label day 前一天的记录作为打标记录
data_train = train_user[(train_user['daystime'] == (end_time-datetime.timedelta(days=1)))]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
# 训练样本中,删除重复的样本
data_train = data_train.drop_duplicates(['user_id', 'item_id'])
data_train_ui = data_train['user_id'] / data_train['item_id']
# print(len(data_train))
# 使用label day 的实际购买情况进行打标
data_label = train_user[train_user['daystime'] == end_time]
data_label_buy = data_label[data_label['behavior_type'] == 4]
data_label_buy_ui = data_label_buy['user_id'] / data_label_buy['item_id']
# 对前一天的交互记录进行打标
data_train_labeled = data_train_ui.isin(data_label_buy_ui)
dict = {True: 1, False: 0}
data_train_labeled = data_train_labeled.map(dict)
data_train['label'] = data_train_labeled
return data_train[['user_id', 'item_id','item_category', 'label']]
def get_label_testset(train_user,LabelDay):
# 测试集选为上一天所有的交互数据
data_test = train_user[(train_user['daystime'] == LabelDay)]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
data_test = data_test.drop_duplicates(['user_id', 'item_id'])
return data_test[['user_id', 'item_id','item_category']]
def item_category_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_category,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
item_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayitem_count = pd.crosstab(beforeoneday.item_category,beforeoneday.behavior_type)
countAverage = item_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = item_count[1]/item_count[4]
buyRate['skim'] = item_count[2]/item_count[4]
buyRate['collect'] = item_count[3]/item_count[4]
buyRate.index = item_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = item_count_before5[1]/item_count_before5[4]
buyRate_2['skim'] = item_count_before5[2]/item_count_before5[4]
buyRate_2['collect'] = item_count_before5[3]/item_count_before5[4]
buyRate_2.index = item_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = item_count_before_3[1]/item_count_before_3[4]
buyRate_3['skim'] = item_count_before_3[2]/item_count_before_3[4]
buyRate_3['collect'] = item_count_before_3[3]/item_count_before_3[4]
buyRate_3.index = item_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
item_category_feture = pd.merge(item_count,beforeonedayitem_count,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,countAverage,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,buyRate,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before5,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_3,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_3,how='left',right_index=True,left_index=True)
item_category_feture.fillna(0,inplace=True)
return item_category_feture
def item_id_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_id,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = | pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type) | pandas.crosstab |
import logging
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder, StandardScaler
from interec.activeness.integrator_activeness import calculate_integrator_activeness
from interec.entities.integrator import Integrator
from interec.entities.pull_request import PullRequest
from interec.string_compare.file_path_similarity import longest_common_prefix, longest_common_suffix, \
longest_common_sub_string, longest_common_sub_sequence
from interec.text_similarity.text_similarity import cos_similarity
from pyspark.sql import SparkSession
database = 'rails'
spark = ""
all_prs_df = ""
all_integrators_df = ""
all_integrators = ""
def initialise_app(database_name):
global database, spark, all_prs_df, all_integrators_df, all_integrators
database = database_name
# Create a spark session
spark = SparkSession \
.builder \
.master('local') \
.appName("Interec") \
.getOrCreate()
# Read table pull_request
all_prs_df = spark.read \
.format("jdbc") \
.option("url", "jdbc:mysql://localhost:3306/" + database) \
.option("driver", 'com.mysql.cj.jdbc.Driver') \
.option("dbtable", "pull_request") \
.option("user", "root") \
.option("password", "") \
.load()
# Read table integrator
all_integrators_df = spark.read \
.format("jdbc") \
.option("url", "jdbc:mysql://localhost:3306/" + database) \
.option("driver", 'com.mysql.cj.jdbc.Driver') \
.option("dbtable", "integrator") \
.option("user", "root") \
.option("password", "") \
.load()
all_prs_df.createOrReplaceTempView("pull_request")
all_integrators_df.createOrReplaceTempView("integrator")
# Get all the integrators for the project
query = "SELECT * FROM integrator"
all_integrators = spark.sql(query).collect()
def calculate_scores(offset, limit):
df = pd.DataFrame()
logging.basicConfig(level=logging.INFO, filename='app.log', format='%(name)s - %(levelname)s - %(message)s')
query1 = "SELECT pr_id, pull_number, requester_login, title, description, created_date, merged_date, " \
"integrator_login, files " \
"FROM pull_request " \
"WHERE pr_id > '%s' and pr_id <= '%s' " \
"ORDER BY pr_id " \
"LIMIT %d" % (offset, offset + limit, limit)
all_prs = spark.sql(query1)
for test_pr in all_prs.collect():
test_pr = PullRequest(test_pr)
print(test_pr.pr_id)
logging.info(test_pr.pr_id)
pr_integrator = Integrator(test_pr.integrator_login)
# Calculate scores for integrator
# Read all the PRs integrator reviewed before
query1 = "SELECT pr_id, pull_number, requester_login, title, description, created_date, merged_date, " \
"integrator_login, files " \
"FROM pull_request " \
"WHERE merged_date < timestamp('%s') AND integrator_login = '%s'" % \
(test_pr.created_date, pr_integrator.integrator_login)
integrator_reviewed_prs = spark.sql(query1).collect()
for integrator_reviewed_pr in integrator_reviewed_prs:
old_pr = PullRequest(integrator_reviewed_pr)
old_pr_file_paths = old_pr.files
# Calculate file path similarity
for new_pr_file_path in test_pr.files:
for file_path in old_pr_file_paths:
number_of_file_combinations = len(old_pr_file_paths) * len(test_pr.files)
max_file_path_length = max(len(new_pr_file_path.split("/")), len(file_path.split("/")))
divider = max_file_path_length * number_of_file_combinations
pr_integrator.longest_common_prefix_score += \
(longest_common_prefix(new_pr_file_path, file_path) / divider)
pr_integrator.longest_common_suffix_score += \
(longest_common_suffix(new_pr_file_path, file_path) / divider)
pr_integrator.longest_common_sub_string_score += \
(longest_common_sub_string(new_pr_file_path, file_path) / divider)
pr_integrator.longest_common_sub_sequence_score += \
(longest_common_sub_sequence(new_pr_file_path, file_path) / divider)
# Calculate cosine similarity of title
pr_integrator.pr_title_similarity += cos_similarity(test_pr.title, old_pr.title)
# Calculate cosine similarity of description
if test_pr.description != "" and old_pr.description != "":
pr_integrator.pr_description_similarity += cos_similarity(test_pr.description, old_pr.description)
# Calculate activeness of the integrator
pr_integrator.activeness += calculate_integrator_activeness(test_pr, old_pr)
row = {'pr_id': test_pr.pr_id,
'integrator': pr_integrator.integrator_login,
'lcp': pr_integrator.longest_common_prefix_score,
'lcs': pr_integrator.longest_common_suffix_score,
'lc_substr': pr_integrator.longest_common_sub_string_score,
'ls_subseq': pr_integrator.longest_common_sub_sequence_score,
'cos_title': pr_integrator.pr_title_similarity,
'cos_description': pr_integrator.pr_description_similarity,
'activeness': pr_integrator.activeness,
'text_similarity': pr_integrator.pr_title_similarity + pr_integrator.pr_description_similarity,
'file_similarity': (pr_integrator.longest_common_prefix_score
+ pr_integrator.longest_common_suffix_score
+ pr_integrator.longest_common_sub_string_score
+ pr_integrator.longest_common_sub_sequence_score)
}
df = df.append(row, ignore_index=True)
csv_file_name = database + "_test_pr_stats.csv"
df.to_csv(csv_file_name, index=False)
def standardize_score(score, min_val, max_val):
new_value = ((score - min_val)*100)/(max_val - min_val)
return new_value
def scale_scores(csv_file_name):
df = | pd.read_csv(csv_file_name) | pandas.read_csv |
from datetime import datetime
from functools import partial
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from featuretools.entityset.relationship import RelationshipPath
from featuretools.exceptions import UnknownFeature
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
GroupByTransformFeature,
IdentityFeature,
TransformFeature
)
from featuretools.utils import Trie
from featuretools.utils.gen_utils import (
Library,
get_relationship_column_id,
import_or_none,
is_instance
)
ks = import_or_none('databricks.koalas')
class FeatureSetCalculator(object):
"""
Calculates the values of a set of features for given instance ids.
"""
def __init__(self, entityset, feature_set, time_last=None,
training_window=None, precalculated_features=None):
"""
Args:
feature_set (FeatureSet): The features to calculate values for.
time_last (pd.Timestamp, optional): Last allowed time. Data from exactly this
time not allowed.
training_window (Timedelta, optional): Window defining how much time before the cutoff time data
can be used when calculating features. If None, all data before cutoff time is used.
precalculated_features (Trie[RelationshipPath -> pd.DataFrame]):
Maps RelationshipPaths to dataframes of precalculated_features
"""
self.entityset = entityset
self.feature_set = feature_set
self.training_window = training_window
if time_last is None:
time_last = datetime.now()
self.time_last = time_last
if precalculated_features is None:
precalculated_features = Trie(path_constructor=RelationshipPath)
self.precalculated_features = precalculated_features
# total number of features (including dependencies) to be calculate
self.num_features = sum(len(features1) + len(features2) for _, (_, features1, features2) in self.feature_set.feature_trie)
def run(self, instance_ids, progress_callback=None, include_cutoff_time=True):
"""
Calculate values of features for the given instances of the target
dataframe.
Summary of algorithm:
1. Construct a trie where the edges are relationships and each node
contains a set of features for a single dataframe. See
FeatureSet._build_feature_trie.
2. Initialize a trie for storing dataframes.
3. Traverse the trie using depth first search. At each node calculate
the features and store the resulting dataframe in the dataframe
trie (so that its values can be used by features which depend on
these features). See _calculate_features_for_dataframe.
4. Get the dataframe at the root of the trie (for the target dataframe) and
return the columns corresponding to the requested features.
Args:
instance_ids (np.ndarray or pd.Categorical): Instance ids for which
to build features.
progress_callback (callable): function to be called with incremental progress updates
include_cutoff_time (bool): If True, data at cutoff time are included
in calculating features.
Returns:
pd.DataFrame : Pandas DataFrame of calculated feature values.
Indexed by instance_ids. Columns in same order as features
passed in.
"""
assert len(instance_ids) > 0, "0 instance ids provided"
if progress_callback is None:
# do nothing for the progress call back if not provided
def progress_callback(*args):
pass
feature_trie = self.feature_set.feature_trie
df_trie = Trie(path_constructor=RelationshipPath)
full_dataframe_trie = Trie(path_constructor=RelationshipPath)
target_dataframe = self.entityset[self.feature_set.target_df_name]
self._calculate_features_for_dataframe(dataframe_name=self.feature_set.target_df_name,
feature_trie=feature_trie,
df_trie=df_trie,
full_dataframe_trie=full_dataframe_trie,
precalculated_trie=self.precalculated_features,
filter_column=target_dataframe.ww.index,
filter_values=instance_ids,
progress_callback=progress_callback,
include_cutoff_time=include_cutoff_time)
# The dataframe for the target dataframe should be stored at the root of
# df_trie.
df = df_trie.value
# Fill in empty rows with default values. This only works for pandas dataframes
# and is not currently supported for Dask dataframes.
if isinstance(df, pd.DataFrame):
index_dtype = df.index.dtype.name
if df.empty:
return self.generate_default_df(instance_ids=instance_ids)
missing_ids = [i for i in instance_ids if i not in
df[target_dataframe.ww.index]]
if missing_ids:
default_df = self.generate_default_df(instance_ids=missing_ids,
extra_columns=df.columns)
df = default_df.append(df, sort=True)
df.index.name = self.entityset[self.feature_set.target_df_name].ww.index
# Order by instance_ids
unique_instance_ids = pd.unique(instance_ids)
unique_instance_ids = unique_instance_ids.astype(instance_ids.dtype)
df = df.reindex(unique_instance_ids)
# Keep categorical index if original index was categorical
if index_dtype == 'category':
df.index = df.index.astype('category')
column_list = []
for feat in self.feature_set.target_features:
column_list.extend(feat.get_feature_names())
if is_instance(df, (dd, ks), 'DataFrame'):
column_list.extend([target_dataframe.ww.index])
return df[column_list]
def _calculate_features_for_dataframe(self, dataframe_name, feature_trie, df_trie,
full_dataframe_trie,
precalculated_trie,
filter_column, filter_values,
parent_data=None,
progress_callback=None,
include_cutoff_time=True):
"""
Generate dataframes with features calculated for this node of the trie,
and all descendant nodes. The dataframes will be stored in df_trie.
Args:
dataframe_name (str): The name of the dataframe to calculate features for.
feature_trie (Trie): the trie with sets of features to calculate.
The root contains features for the given dataframe.
df_trie (Trie): a parallel trie for storing dataframes. The
dataframe with features calculated will be placed in the root.
full_dataframe_trie (Trie): a trie storing dataframes will all dataframe
rows, for features that are uses_full_dataframe.
precalculated_trie (Trie): a parallel trie containing dataframes
with precalculated features. The dataframe specified by dataframe_name
will be at the root.
filter_column (str): The name of the column to filter this
dataframe by.
filter_values (pd.Series): The values to filter the filter_column
to.
parent_data (tuple[Relationship, list[str], pd.DataFrame]): Data
related to the parent of this trie. This will only be present if
the relationship points from this dataframe to the parent dataframe. A
3 tuple of (parent_relationship,
ancestor_relationship_columns, parent_df).
ancestor_relationship_columns is the names of columns which
link the parent dataframe to its ancestors.
include_cutoff_time (bool): If True, data at cutoff time are included
in calculating features.
"""
# Step 1: Get a dataframe for the given dataframe name, filtered by the given
# conditions.
need_full_dataframe, full_dataframe_features, not_full_dataframe_features = feature_trie.value
all_features = full_dataframe_features | not_full_dataframe_features
columns = self._necessary_columns(dataframe_name, all_features)
# If we need the full dataframe then don't filter by filter_values.
if need_full_dataframe:
query_column = None
query_values = None
else:
query_column = filter_column
query_values = filter_values
df = self.entityset.query_by_values(dataframe_name=dataframe_name,
instance_vals=query_values,
column_name=query_column,
columns=columns,
time_last=self.time_last,
training_window=self.training_window,
include_cutoff_time=include_cutoff_time)
# call to update timer
progress_callback(0)
# Step 2: Add columns to the dataframe linking it to all ancestors.
new_ancestor_relationship_columns = []
if parent_data:
parent_relationship, ancestor_relationship_columns, parent_df = \
parent_data
if ancestor_relationship_columns:
df, new_ancestor_relationship_columns = self._add_ancestor_relationship_columns(
df, parent_df, ancestor_relationship_columns, parent_relationship)
# Add the column linking this dataframe to its parent, so that
# descendants get linked to the parent.
new_ancestor_relationship_columns.append(parent_relationship._child_column_name)
# call to update timer
progress_callback(0)
# Step 3: Recurse on children.
# Pass filtered values, even if we are using a full df.
if need_full_dataframe:
if isinstance(filter_values, dd.Series):
msg = "Cannot use primitives that require full dataframe with Dask EntitySets"
raise ValueError(msg)
filtered_df = df[df[filter_column].isin(filter_values)]
else:
filtered_df = df
for edge, sub_trie in feature_trie.children():
is_forward, relationship = edge
if is_forward:
sub_dataframe_name = relationship.parent_dataframe.ww.name
sub_filter_column = relationship._parent_column_name
sub_filter_values = filtered_df[relationship._child_column_name]
parent_data = None
else:
sub_dataframe_name = relationship.child_dataframe.ww.name
sub_filter_column = relationship._child_column_name
sub_filter_values = filtered_df[relationship._parent_column_name]
parent_data = (relationship,
new_ancestor_relationship_columns,
df)
sub_df_trie = df_trie.get_node([edge])
sub_full_dataframe_trie = full_dataframe_trie.get_node([edge])
sub_precalc_trie = precalculated_trie.get_node([edge])
self._calculate_features_for_dataframe(
dataframe_name=sub_dataframe_name,
feature_trie=sub_trie,
df_trie=sub_df_trie,
full_dataframe_trie=sub_full_dataframe_trie,
precalculated_trie=sub_precalc_trie,
filter_column=sub_filter_column,
filter_values=sub_filter_values,
parent_data=parent_data,
progress_callback=progress_callback,
include_cutoff_time=include_cutoff_time)
# Step 4: Calculate the features for this dataframe.
#
# All dependencies of the features for this dataframe have been calculated
# by the above recursive calls, and their results stored in df_trie.
# Add any precalculated features.
precalculated_features_df = precalculated_trie.value
if precalculated_features_df is not None:
# Left outer merge to keep all rows of df.
df = df.merge(precalculated_features_df,
how='left',
left_index=True,
right_index=True,
suffixes=('', '_precalculated'))
# call to update timer
progress_callback(0)
# First, calculate any features that require the full dataframe. These can
# be calculated first because all of their dependents are included in
# full_dataframe_features.
if need_full_dataframe:
df = self._calculate_features(df, full_dataframe_trie, full_dataframe_features, progress_callback)
# Store full dataframe
full_dataframe_trie.value = df
# Filter df so that features that don't require the full dataframe are
# only calculated on the necessary instances.
df = df[df[filter_column].isin(filter_values)]
# Calculate all features that don't require the full dataframe.
df = self._calculate_features(df, df_trie, not_full_dataframe_features, progress_callback)
# Step 5: Store the dataframe for this dataframe at the root of df_trie, so
# that it can be accessed by the caller.
df_trie.value = df
def _calculate_features(self, df, df_trie, features, progress_callback):
# Group the features so that each group can be calculated together.
# The groups must also be in topological order (if A is a transform of B
# then B must be in a group before A).
feature_groups = self.feature_set.group_features(features)
for group in feature_groups:
representative_feature = group[0]
handler = self._feature_type_handler(representative_feature)
df = handler(group, df, df_trie, progress_callback)
return df
def _add_ancestor_relationship_columns(self, child_df, parent_df,
ancestor_relationship_columns,
relationship):
"""
Merge ancestor_relationship_columns from parent_df into child_df, adding a prefix to
each column name specifying the relationship.
Return the updated df and the new relationship column names.
Args:
child_df (pd.DataFrame): The dataframe to add relationship columns to.
parent_df (pd.DataFrame): The dataframe to copy relationship columns from.
ancestor_relationship_columns (list[str]): The names of
relationship columns in the parent_df to copy into child_df.
relationship (Relationship): the relationship through which the
child is connected to the parent.
"""
relationship_name = relationship.parent_name
new_relationship_columns = ['%s.%s' % (relationship_name, col)
for col in ancestor_relationship_columns]
# create an intermediate dataframe which shares a column
# with the child dataframe and has a column with the
# original parent's id.
col_map = {relationship._parent_column_name: relationship._child_column_name}
for child_column, parent_column in zip(new_relationship_columns, ancestor_relationship_columns):
col_map[parent_column] = child_column
merge_df = parent_df[list(col_map.keys())].rename(columns=col_map)
merge_df.index.name = None # change index name for merge
# Merge the dataframe, adding the relationship columns to the child.
# Left outer join so that all rows in child are kept (if it contains
# all rows of the dataframe then there may not be corresponding rows in the
# parent_df).
df = child_df.merge(merge_df,
how='left',
left_on=relationship._child_column_name,
right_on=relationship._child_column_name)
# ensure index is maintained
# TODO: Review for dask dataframes
if isinstance(df, pd.DataFrame):
df.set_index(relationship.child_dataframe.ww.index, drop=False, inplace=True)
return df, new_relationship_columns
def generate_default_df(self, instance_ids, extra_columns=None):
default_row = []
default_cols = []
for f in self.feature_set.target_features:
for name in f.get_feature_names():
default_cols.append(name)
default_row.append(f.default_value)
default_matrix = [default_row] * len(instance_ids)
default_df = pd.DataFrame(default_matrix,
columns=default_cols,
index=instance_ids)
index_name = self.entityset[self.feature_set.target_df_name].ww.index
default_df.index.name = index_name
if extra_columns is not None:
for c in extra_columns:
if c not in default_df.columns:
default_df[c] = [np.nan] * len(instance_ids)
return default_df
def _feature_type_handler(self, f):
if type(f) == TransformFeature:
return self._calculate_transform_features
elif type(f) == GroupByTransformFeature:
return self._calculate_groupby_features
elif type(f) == DirectFeature:
return self._calculate_direct_features
elif type(f) == AggregationFeature:
return self._calculate_agg_features
elif type(f) == IdentityFeature:
return self._calculate_identity_features
else:
raise UnknownFeature(u"{} feature unknown".format(f.__class__))
def _calculate_identity_features(self, features, df, _df_trie, progress_callback):
for f in features:
assert f.get_name() in df.columns, (
'Column "%s" missing frome dataframe' % f.get_name())
progress_callback(len(features) / float(self.num_features))
return df
def _calculate_transform_features(self, features, frame, _df_trie, progress_callback):
frame_empty = frame.empty if isinstance(frame, pd.DataFrame) else False
feature_values = []
for f in features:
# handle when no data
if frame_empty:
set_default_column(frame, f)
progress_callback(1 / float(self.num_features))
continue
# collect only the columns we need for this transformation
column_data = [frame[bf.get_name()]
for bf in f.base_features]
feature_func = f.get_function()
# apply the function to the relevant dataframe slice and add the
# feature row to the results dataframe.
if f.primitive.uses_calc_time:
values = feature_func(*column_data, time=self.time_last)
else:
values = feature_func(*column_data)
# if we don't get just the values, the assignment breaks when indexes don't match
if f.number_output_features > 1:
values = [strip_values_if_series(value) for value in values]
else:
values = [strip_values_if_series(values)]
feature_values.append((f, values))
progress_callback(1 / float(self.num_features))
frame = update_feature_columns(feature_values, frame)
return frame
def _calculate_groupby_features(self, features, frame, _df_trie, progress_callback):
for f in features:
set_default_column(frame, f)
# handle when no data
if frame.shape[0] == 0:
progress_callback(len(features) / float(self.num_features))
return frame
groupby = features[0].groupby.get_name()
grouped = frame.groupby(groupby)
groups = frame[groupby].unique() # get all the unique group name to iterate over later
for f in features:
feature_vals = []
for _ in range(f.number_output_features):
feature_vals.append([])
for group in groups:
# skip null key if it exists
if pd.isnull(group):
continue
column_names = [bf.get_name() for bf in f.base_features]
# exclude the groupby column from being passed to the function
column_data = [grouped[name].get_group(group) for name in column_names[:-1]]
feature_func = f.get_function()
# apply the function to the relevant dataframe slice and add the
# feature row to the results dataframe.
if f.primitive.uses_calc_time:
values = feature_func(*column_data, time=self.time_last)
else:
values = feature_func(*column_data)
if f.number_output_features == 1:
values = [values]
# make sure index is aligned
for i, value in enumerate(values):
if isinstance(value, pd.Series):
value.index = column_data[0].index
else:
value = pd.Series(value, index=column_data[0].index)
feature_vals[i].append(value)
if any(feature_vals):
assert len(feature_vals) == len(f.get_feature_names())
for col_vals, name in zip(feature_vals, f.get_feature_names()):
frame[name].update( | pd.concat(col_vals) | pandas.concat |
"""Python调用天软的封装"""
import sys
sys.path.append(r"D:\programs\Analyse.NET")
import pandas as pd
import TSLPy3 as tsl
import os
from FactorLib.utils.tool_funcs import tradecode_to_tslcode, tslcode_to_tradecode
from FactorLib.utils.datetime_func import DateRange2Dates
from FactorLib.utils.TSDataParser import *
from functools import reduce, partial
_ashare = "'上证A股;深证A股;创业板;中小企业板;科创板;暂停上市;终止上市'"
_fund = "'上证基金;深证基金;开放式基金'"
_condition = 'firstday()<=getsysparam(pn_date())'
def _gstr_from_func(func_name, func_args):
func_str = "data := {func_name}({args}); return data;".format(func_name=func_name, args=",".join(func_args))
return func_str
def encode_datetime(dt):
return tsl.EncodeDateTime(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, 0)
def decode_date(dt):
dt_decode = tsl.DecodeDate(dt)
return pd.Timestamp(*dt_decode)
def run_script(script, sysparams):
data = tsl.RemoteExecute(script, sysparams)
return data
def run_function(func_name, *args, **kwargs):
"""
调用天软任意函数
Parameters:
func_name: str
函数名称
args:
函数参数列表
字符串类型的参数需要加单引号,例如"'SH600001'"
kwargs:
天软系统参数
"""
script_str = _gstr_from_func(func_name, args)
return run_script(script_str, kwargs)
def CsQuery(field_dict, end_date, bk_name=_ashare, stock_list=None, condition="1",
code_transfer=True, **kwargs):
"""对天软Query函数的封装
Parameters:
===========
field_dict: dict
计算字段{字段名称: 函数名称},字段名称需自加单引号。
例:{"'IDs'": 'DefaultStockID()'}
end_daet: str
截面日期
bk_name: str
天软板块名称,以分号分割。
stock_list: list
股票列表,默认的格式是6位代码
condition: str
天软Csquery参数
code_transfer: bool
是否要将股票列表的代码转为天软格式。如果stock_list中代码格式
没有后缀,那code_transfer需为True。
kwargs: dict
天软系统参数
"""
field_dict.update({"'IDs'": 'DefaultStockID()'})
if stock_list is None:
stock_list = "''"
else:
if code_transfer:
stock_list = "'%s'" % ";".join(map(tradecode_to_tslcode, stock_list))
else:
stock_list = "'%s'" % ";".join(stock_list)
if (end_date.hour == 0) and (end_date.minute == 0) and (end_date.second == 0):
encode_date = tsl.EncodeDate(end_date.year, end_date.month, end_date.day)
else:
encode_date = tsl.EncodeDateTime(end_date.year, end_date.month, end_date.day,
end_date.hour, end_date.minute, end_date.second, 0)
func_name = "Query"
func_args = [bk_name, stock_list, condition, "''"] + list(reduce(lambda x, y: x+y, field_dict.items()))
script_str = _gstr_from_func(func_name, func_args)
sysparams = {'CurrentDate': encode_date}
sysparams.update(kwargs)
data = tsl.RemoteExecute(script_str, sysparams)
df = parse2DArray(data, column_decode=['IDs'])
df['IDs'] = df['IDs'].apply(tslcode_to_tradecode)
df['date'] = end_date
return df.set_index(['date', 'IDs'])
def TsQuery(field_dict, dates, stock, code_transfer=True, **kwargs):
"""
天软时间序列函数
"""
field_dict.update({"'date'": 'DateTimeToStr(sp_time())', "'IDs'": 'DefaultStockID()'})
if code_transfer:
stock = tradecode_to_tslcode(stock)
N = len(dates)
func_args = [str(N)] + list(reduce(lambda x, y: x+y, field_dict.items()))
func_name = "Nday"
script_str = _gstr_from_func(func_name, func_args)
end_date = max(dates)
if (end_date.hour == 0) and (end_date.minute == 0) and (end_date.second == 0):
encode_date = tsl.EncodeDate(end_date.year, end_date.month, end_date.day)
else:
encode_date = tsl.EncodeDateTime(end_date.year, end_date.month, end_date.day,
end_date.hour, end_date.minute, end_date.second, 0)
sysparams = {'CurrentDate': encode_date, 'StockID': stock}
sysparams.update(kwargs)
data = tsl.RemoteExecute(script_str, sysparams)
df = parse2DArray(data, column_decode=['IDs', 'date'])
df['IDs'] = df['IDs'].apply(tslcode_to_tradecode)
df['date'] = | pd.DatetimeIndex(df['date']) | pandas.DatetimeIndex |
"""
https://www.kaggle.com/weicongkong/feedback-prize-huggingface-baseline-training/edit
Copyright (C) <NAME>, 23/02/2022
"""
# %% [markdown]
# # HuggingFace Training Baseline
#
# I wanted to create my own baseline for this competition, and I tried to do so "without peeking" at the kernels published by others. Ideally this can be used for training on a Kaggle kernel. Let's see how good we can get.
#
# This baseline is based on the following notebook by <NAME>: https://github.com/huggingface/notebooks/blob/master/examples/token_classification.ipynb
#
# I initially started building with Roberta - thanks to <NAME> for pointing to Longformer :) The evaluation code is from <NAME>.
#
# The notebook requires a couple of hours to run, so we'll use W&B to be able to monitor it along the way and keep the record of our experiments.
# %% [markdown]
# ## Setup
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T22:59:40.43361Z","iopub.execute_input":"2021-12-23T22:59:40.434Z","iopub.status.idle":"2021-12-23T22:59:40.438896Z","shell.execute_reply.started":"2021-12-23T22:59:40.433966Z","shell.execute_reply":"2021-12-23T22:59:40.437857Z"}}
SAMPLE = True # set True for debugging
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:00.094757Z","iopub.execute_input":"2021-12-23T23:00:00.095189Z","iopub.status.idle":"2021-12-23T23:00:08.865381Z","shell.execute_reply.started":"2021-12-23T23:00:00.095139Z","shell.execute_reply":"2021-12-23T23:00:08.86421Z"}}
# setup wandb for experiment tracking
# source: https://www.kaggle.com/debarshichanda/pytorch-w-b-jigsaw-starter
import wandb
wandb.login(key='<KEY>')
wandb.init(project="feedback_prize", entity="wilsonkong")
anony = None
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:08.872471Z","iopub.execute_input":"2021-12-23T23:00:08.875384Z","iopub.status.idle":"2021-12-23T23:00:09.613866Z","shell.execute_reply.started":"2021-12-23T23:00:08.875328Z","shell.execute_reply":"2021-12-23T23:00:09.612856Z"}}
# CONFIG
EXP_NUM = 4
task = "ner"
model_checkpoint = "allenai/longformer-base-4096"
max_length = 1024
stride = 128
min_tokens = 6
model_path = f'{model_checkpoint.split("/")[-1]}-{EXP_NUM}'
# TRAINING HYPERPARAMS
BS = 1
GRAD_ACC = 8
LR = 5e-5
WD = 0.01
WARMUP = 0.1
N_EPOCHS = 5
# %% [markdown]
# ## Data Preprocessing
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:09.615125Z","iopub.execute_input":"2021-12-23T23:00:09.615508Z","iopub.status.idle":"2021-12-23T23:00:11.240349Z","shell.execute_reply.started":"2021-12-23T23:00:09.615458Z","shell.execute_reply":"2021-12-23T23:00:11.239275Z"}}
import pandas as pd
import os
pd.options.display.width = 500
pd.options.display.max_columns = 20
# read train data
DATA_ROOT = r"C:\Users\wkong\IdeaProjects\kaggle_data\feedback-prize-2021"
train = pd.read_csv(os.path.join(DATA_ROOT, "train.csv"))
train.head(1)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:11.245598Z","iopub.execute_input":"2021-12-23T23:00:11.248663Z","iopub.status.idle":"2021-12-23T23:00:12.088646Z","shell.execute_reply.started":"2021-12-23T23:00:11.248611Z","shell.execute_reply":"2021-12-23T23:00:12.087709Z"}}
# check unique classes
classes = train.discourse_type.unique().tolist()
classes
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:12.090074Z","iopub.execute_input":"2021-12-23T23:00:12.090401Z","iopub.status.idle":"2021-12-23T23:00:12.909927Z","shell.execute_reply.started":"2021-12-23T23:00:12.090357Z","shell.execute_reply":"2021-12-23T23:00:12.908979Z"}}
# setup label indices
from collections import defaultdict
tags = defaultdict()
for i, c in enumerate(classes):
tags[f'B-{c}'] = i
tags[f'I-{c}'] = i + len(classes)
tags[f'O'] = len(classes) * 2
tags[f'Special'] = -100
l2i = dict(tags)
i2l = defaultdict()
for k, v in l2i.items():
i2l[v] = k
i2l[-100] = 'Special'
i2l = dict(i2l)
N_LABELS = len(i2l) - 1 # not accounting for -100
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:12.913651Z","iopub.execute_input":"2021-12-23T23:00:12.913893Z","iopub.status.idle":"2021-12-23T23:00:13.630498Z","shell.execute_reply.started":"2021-12-23T23:00:12.913861Z","shell.execute_reply":"2021-12-23T23:00:13.629554Z"}}
# some helper functions
from pathlib import Path
path = Path(os.path.join(DATA_ROOT, 'train'))
def get_raw_text(ids):
with open(path / f'{ids}.txt', 'r') as file: data = file.read()
return data
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:13.634902Z","iopub.execute_input":"2021-12-23T23:00:13.635138Z","iopub.status.idle":"2021-12-23T23:00:24.829274Z","shell.execute_reply.started":"2021-12-23T23:00:13.635107Z","shell.execute_reply":"2021-12-23T23:00:24.828189Z"}}
# group training labels by text file
df1 = train.groupby('id')['discourse_type'].apply(list).reset_index(name='classlist')
df2 = train.groupby('id')['discourse_start'].apply(list).reset_index(name='starts')
df3 = train.groupby('id')['discourse_end'].apply(list).reset_index(name='ends')
df4 = train.groupby('id')['predictionstring'].apply(list).reset_index(name='predictionstrings')
df = pd.merge(df1, df2, how='inner', on='id')
df = pd.merge(df, df3, how='inner', on='id')
df = pd.merge(df, df4, how='inner', on='id')
df['text'] = df['id'].apply(get_raw_text)
df.head()
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:24.831063Z","iopub.execute_input":"2021-12-23T23:00:24.831421Z","iopub.status.idle":"2021-12-23T23:00:25.596595Z","shell.execute_reply.started":"2021-12-23T23:00:24.831375Z","shell.execute_reply":"2021-12-23T23:00:25.595633Z"}}
# debugging
if SAMPLE: df = df.sample(n=100).reset_index(drop=True)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:25.59961Z","iopub.execute_input":"2021-12-23T23:00:25.600322Z","iopub.status.idle":"2021-12-23T23:00:26.415085Z","shell.execute_reply.started":"2021-12-23T23:00:25.600259Z","shell.execute_reply":"2021-12-23T23:00:26.413987Z"}}
# we will use HuggingFace datasets
from datasets import Dataset, load_metric
ds = Dataset.from_pandas(df)
datasets = ds.train_test_split(test_size=0.1, shuffle=True, seed=42)
datasets
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:26.416852Z","iopub.execute_input":"2021-12-23T23:00:26.417192Z","iopub.status.idle":"2021-12-23T23:00:31.722501Z","shell.execute_reply.started":"2021-12-23T23:00:26.417127Z","shell.execute_reply":"2021-12-23T23:00:31.721572Z"}}
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, add_prefix_space=True)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:31.724112Z","iopub.execute_input":"2021-12-23T23:00:31.724482Z","iopub.status.idle":"2021-12-23T23:00:32.494243Z","shell.execute_reply.started":"2021-12-23T23:00:31.724438Z","shell.execute_reply":"2021-12-23T23:00:32.49297Z"}}
# Not sure if this is needed, but in case we create a span with certain class without starting token of that class,
# let's convert the first token to be the starting token.
e = [0, 7, 7, 7, 1, 1, 8, 8, 8, 9, 9, 9, 14, 4, 4, 4]
def fix_beginnings(labels):
for i in range(1, len(labels)):
curr_lab = labels[i]
prev_lab = labels[i - 1]
if curr_lab in range(7, 14):
if prev_lab != curr_lab and prev_lab != curr_lab - 7:
labels[i] = curr_lab - 7
return labels
fix_beginnings(e)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:32.495836Z","iopub.execute_input":"2021-12-23T23:00:32.496208Z","iopub.status.idle":"2021-12-23T23:00:33.263669Z","shell.execute_reply.started":"2021-12-23T23:00:32.49614Z","shell.execute_reply":"2021-12-23T23:00:33.262629Z"}}
# tokenize and add labels
def tokenize_and_align_labels(examples):
o = tokenizer(examples['text'], truncation=True, padding=True, return_offsets_mapping=True, max_length=max_length,
stride=stride, return_overflowing_tokens=True)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = o["overflow_to_sample_mapping"]
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = o["offset_mapping"]
o["labels"] = []
for i in range(len(offset_mapping)):
sample_index = sample_mapping[i]
labels = [l2i['O'] for i in range(len(o['input_ids'][i]))]
for label_start, label_end, label in \
list(zip(examples['starts'][sample_index], examples['ends'][sample_index],
examples['classlist'][sample_index])):
for j in range(len(labels)):
token_start = offset_mapping[i][j][0]
token_end = offset_mapping[i][j][1]
if token_start == label_start:
labels[j] = l2i[f'B-{label}']
if token_start > label_start and token_end <= label_end:
labels[j] = l2i[f'I-{label}']
for k, input_id in enumerate(o['input_ids'][i]):
if input_id in [0, 1, 2]:
labels[k] = -100
labels = fix_beginnings(labels)
o["labels"].append(labels)
return o
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:33.265142Z","iopub.execute_input":"2021-12-23T23:00:33.265646Z","iopub.status.idle":"2021-12-23T23:00:35.856612Z","shell.execute_reply.started":"2021-12-23T23:00:33.265601Z","shell.execute_reply":"2021-12-23T23:00:35.855589Z"}}
tokenized_datasets = datasets.map(tokenize_and_align_labels, batched=True, \
batch_size=20000, remove_columns=datasets["train"].column_names)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:35.858326Z","iopub.execute_input":"2021-12-23T23:00:35.858635Z","iopub.status.idle":"2021-12-23T23:00:36.592654Z","shell.execute_reply.started":"2021-12-23T23:00:35.85859Z","shell.execute_reply":"2021-12-23T23:00:36.591606Z"}}
tokenized_datasets
# %% [markdown]
# ## Model and Training
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:36.59433Z","iopub.execute_input":"2021-12-23T23:00:36.594634Z","iopub.status.idle":"2021-12-23T23:00:40.685632Z","shell.execute_reply.started":"2021-12-23T23:00:36.594593Z","shell.execute_reply":"2021-12-23T23:00:40.684693Z"}}
# we will use auto model for token classification
from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer
model = AutoModelForTokenClassification.from_pretrained(model_checkpoint, num_labels=N_LABELS)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:40.690854Z","iopub.execute_input":"2021-12-23T23:00:40.693718Z","iopub.status.idle":"2021-12-23T23:00:41.535273Z","shell.execute_reply.started":"2021-12-23T23:00:40.693672Z","shell.execute_reply":"2021-12-23T23:00:41.534215Z"}}
model_name = model_checkpoint.split("/")[-1]
args = TrainingArguments(
f"{model_name}-finetuned-{task}",
evaluation_strategy="epoch",
logging_strategy="epoch",
save_strategy="epoch",
learning_rate=LR,
per_device_train_batch_size=BS,
per_device_eval_batch_size=BS,
num_train_epochs=N_EPOCHS,
weight_decay=WD,
report_to='wandb',
gradient_accumulation_steps=GRAD_ACC,
warmup_ratio=WARMUP
)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:41.53676Z","iopub.execute_input":"2021-12-23T23:00:41.537608Z","iopub.status.idle":"2021-12-23T23:00:42.282789Z","shell.execute_reply.started":"2021-12-23T23:00:41.537572Z","shell.execute_reply":"2021-12-23T23:00:42.281853Z"}}
from transformers import DataCollatorForTokenClassification
data_collator = DataCollatorForTokenClassification(tokenizer)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:42.284192Z","iopub.execute_input":"2021-12-23T23:00:42.284501Z","iopub.status.idle":"2021-12-23T23:00:43.656933Z","shell.execute_reply.started":"2021-12-23T23:00:42.284458Z","shell.execute_reply":"2021-12-23T23:00:43.655937Z"}}
# this is not the competition metric, but for now this will be better than nothing...
metric = load_metric("seqeval")
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:43.658571Z","iopub.execute_input":"2021-12-23T23:00:43.658881Z","iopub.status.idle":"2021-12-23T23:00:44.386693Z","shell.execute_reply.started":"2021-12-23T23:00:43.658824Z","shell.execute_reply":"2021-12-23T23:00:44.385607Z"}}
import numpy as np
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[i2l[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[i2l[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
results = metric.compute(predictions=true_predictions, references=true_labels)
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:44.388421Z","iopub.execute_input":"2021-12-23T23:00:44.388744Z","iopub.status.idle":"2021-12-23T23:00:45.313179Z","shell.execute_reply.started":"2021-12-23T23:00:44.38869Z","shell.execute_reply":"2021-12-23T23:00:45.312215Z"}}
trainer = Trainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:00:45.314663Z","iopub.execute_input":"2021-12-23T23:00:45.318411Z","iopub.status.idle":"2021-12-23T23:03:13.651205Z","shell.execute_reply.started":"2021-12-23T23:00:45.318345Z","shell.execute_reply":"2021-12-23T23:03:13.650259Z"}}
trainer.train()
wandb.finish()
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:13.656546Z","iopub.execute_input":"2021-12-23T23:03:13.656788Z","iopub.status.idle":"2021-12-23T23:03:15.317965Z","shell.execute_reply.started":"2021-12-23T23:03:13.656757Z","shell.execute_reply":"2021-12-23T23:03:15.316868Z"}}
trainer.save_model(model_path)
# %% [markdown]
# ## Validation
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:15.31952Z","iopub.execute_input":"2021-12-23T23:03:15.319834Z","iopub.status.idle":"2021-12-23T23:03:15.332639Z","shell.execute_reply.started":"2021-12-23T23:03:15.319782Z","shell.execute_reply":"2021-12-23T23:03:15.331235Z"}}
def tokenize_for_validation(examples):
o = tokenizer(examples['text'], truncation=True, return_offsets_mapping=True, max_length=4096)
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = o["offset_mapping"]
o["labels"] = []
for i in range(len(offset_mapping)):
labels = [l2i['O'] for i in range(len(o['input_ids'][i]))]
for label_start, label_end, label in \
list(zip(examples['starts'][i], examples['ends'][i], examples['classlist'][i])):
for j in range(len(labels)):
token_start = offset_mapping[i][j][0]
token_end = offset_mapping[i][j][1]
if token_start == label_start:
labels[j] = l2i[f'B-{label}']
if token_start > label_start and token_end <= label_end:
labels[j] = l2i[f'I-{label}']
for k, input_id in enumerate(o['input_ids'][i]):
if input_id in [0, 1, 2]:
labels[k] = -100
labels = fix_beginnings(labels)
o["labels"].append(labels)
return o
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:15.334494Z","iopub.execute_input":"2021-12-23T23:03:15.335669Z","iopub.status.idle":"2021-12-23T23:03:16.652272Z","shell.execute_reply.started":"2021-12-23T23:03:15.335596Z","shell.execute_reply":"2021-12-23T23:03:16.651209Z"}}
tokenized_val = datasets.map(tokenize_for_validation, batched=True)
tokenized_val
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:16.654017Z","iopub.execute_input":"2021-12-23T23:03:16.654625Z","iopub.status.idle":"2021-12-23T23:03:16.711036Z","shell.execute_reply.started":"2021-12-23T23:03:16.654567Z","shell.execute_reply":"2021-12-23T23:03:16.710012Z"}}
# ground truth for validation
l = []
for example in tokenized_val['test']:
for c, p in list(zip(example['classlist'], example['predictionstrings'])):
l.append({
'id': example['id'],
'discourse_type': c,
'predictionstring': p,
})
gt_df = pd.DataFrame(l)
gt_df
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:16.712458Z","iopub.execute_input":"2021-12-23T23:03:16.713221Z","iopub.status.idle":"2021-12-23T23:03:16.719502Z","shell.execute_reply.started":"2021-12-23T23:03:16.713168Z","shell.execute_reply":"2021-12-23T23:03:16.718212Z"}}
# visualization with displacy
import pandas as pd
import os
from pathlib import Path
import spacy
from spacy import displacy
from pylab import cm, matplotlib
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:16.721142Z","iopub.execute_input":"2021-12-23T23:03:16.721798Z","iopub.status.idle":"2021-12-23T23:03:16.733508Z","shell.execute_reply.started":"2021-12-23T23:03:16.721753Z","shell.execute_reply":"2021-12-23T23:03:16.732443Z"}}
path = Path(os.path.join(DATA_ROOT, 'train'))
colors = {
'Lead': '#8000ff',
'Position': '#2b7ff6',
'Evidence': '#2adddd',
'Claim': '#80ffb4',
'Concluding Statement': 'd4dd80',
'Counterclaim': '#ff8042',
'Rebuttal': '#ff0000',
'Other': '#007f00',
}
def visualize(df, text):
ents = []
example = df['id'].loc[0]
for i, row in df.iterrows():
ents.append({
'start': int(row['discourse_start']),
'end': int(row['discourse_end']),
'label': row['discourse_type']
})
doc2 = {
"text": text,
"ents": ents,
"title": example
}
options = {"ents": train.discourse_type.unique().tolist() + ['Other'], "colors": colors}
displacy.render(doc2, style="ent", options=options, manual=True, jupyter=True)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:16.735115Z","iopub.execute_input":"2021-12-23T23:03:16.736247Z","iopub.status.idle":"2021-12-23T23:03:17.621012Z","shell.execute_reply.started":"2021-12-23T23:03:16.736199Z","shell.execute_reply":"2021-12-23T23:03:17.619921Z"}}
predictions, labels, _ = trainer.predict(tokenized_val['test'])
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:17.622787Z","iopub.execute_input":"2021-12-23T23:03:17.623357Z","iopub.status.idle":"2021-12-23T23:03:17.632659Z","shell.execute_reply.started":"2021-12-23T23:03:17.623297Z","shell.execute_reply":"2021-12-23T23:03:17.631425Z"}}
preds = np.argmax(predictions, axis=-1)
preds.shape
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:17.634765Z","iopub.execute_input":"2021-12-23T23:03:17.63535Z","iopub.status.idle":"2021-12-23T23:03:17.655065Z","shell.execute_reply.started":"2021-12-23T23:03:17.635228Z","shell.execute_reply":"2021-12-23T23:03:17.653955Z"}}
# code that will convert our predictions into prediction strings, and visualize it at the same time
# this most likely requires some refactoring
def get_class(c):
if c == 14:
return 'Other'
else:
return i2l[c][2:]
def pred2span(pred, example, viz=False, test=False):
example_id = example['id']
n_tokens = len(example['input_ids'])
classes = []
all_span = []
for i, c in enumerate(pred.tolist()):
if i == n_tokens - 1:
break
if i == 0:
cur_span = example['offset_mapping'][i]
classes.append(get_class(c))
elif i > 0 and (c == pred[i - 1] or (c - 7) == pred[i - 1]):
cur_span[1] = example['offset_mapping'][i][1]
else:
all_span.append(cur_span)
cur_span = example['offset_mapping'][i]
classes.append(get_class(c))
all_span.append(cur_span)
if test:
text = get_test_text(example_id)
else:
text = get_raw_text(example_id)
# abra ka dabra se soli fanta ko pelo
# map token ids to word (whitespace) token ids
predstrings = []
for span in all_span:
span_start = span[0]
span_end = span[1]
before = text[:span_start]
token_start = len(before.split())
if len(before) == 0:
token_start = 0
elif before[-1] != ' ':
token_start -= 1
num_tkns = len(text[span_start:span_end + 1].split())
tkns = [str(x) for x in range(token_start, token_start + num_tkns)]
predstring = ' '.join(tkns)
predstrings.append(predstring)
rows = []
for c, span, predstring in zip(classes, all_span, predstrings):
e = {
'id': example_id,
'discourse_type': c,
'predictionstring': predstring,
'discourse_start': span[0],
'discourse_end': span[1],
'discourse': text[span[0]:span[1] + 1]
}
rows.append(e)
df = pd.DataFrame(rows)
df['length'] = df['discourse'].apply(lambda t: len(t.split()))
# short spans are likely to be false positives, we can choose a min number of tokens based on validation
df = df[df.length > min_tokens].reset_index(drop=True)
if viz: visualize(df, text)
return df
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:17.658868Z","iopub.execute_input":"2021-12-23T23:03:17.659221Z","iopub.status.idle":"2021-12-23T23:03:17.712976Z","shell.execute_reply.started":"2021-12-23T23:03:17.659184Z","shell.execute_reply":"2021-12-23T23:03:17.711747Z"}}
pred2span(preds[0], tokenized_val['test'][0], viz=True)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:17.71609Z","iopub.execute_input":"2021-12-23T23:03:17.716626Z","iopub.status.idle":"2021-12-23T23:03:17.757272Z","shell.execute_reply.started":"2021-12-23T23:03:17.716588Z","shell.execute_reply":"2021-12-23T23:03:17.756227Z"}}
pred2span(preds[1], tokenized_val['test'][1], viz=True)
# %% [code] {"execution":{"iopub.status.busy":"2021-12-23T23:03:17.759337Z","iopub.execute_input":"2021-12-23T23:03:17.760071Z","iopub.status.idle":"2021-12-23T23:03:17.883329Z","shell.execute_reply.started":"2021-12-23T23:03:17.760003Z","shell.execute_reply":"2021-12-23T23:03:17.8822Z"}}
dfs = []
for i in range(len(tokenized_val['test'])):
dfs.append(pred2span(preds[i], tokenized_val['test'][i]))
pred_df = | pd.concat(dfs, axis=0) | pandas.concat |
import pandas as pd
import numpy as np
from functools import reduce
class DFLoader(object):
def __init__(self, url_list):
self.url_list = url_list
def load(self):
data_frames = []
for key, value in self.url_list.items():
df = pd.read_csv(value, error_bad_lines=False)
df_long = df.melt(id_vars = ["Province/State", "Country/Region", "Lat", "Long"],\
var_name="Date", value_name=key)
df_long["Date"] = pd.to_datetime(df_long["Date"])
data_frames.append(df_long)
df_merged = reduce(lambda left,right: pd.merge(left, right, on=["Date","Province/State", "Country/Region", "Lat", "Long"],
how='outer'), data_frames)
min_val = df_merged["Confirmed"].min()
max_val = df_merged["Confirmed"].max()
df_merged["ConfNorm"] = (df_merged["Confirmed"]-min_val)/(max_val-min_val)
df_merged["DeathNorm"] = (df_merged["Deaths"]-min_val)/(max_val-min_val)
df_merged["RecoverNorm"] = (df_merged["Recovered"]-min_val)/(max_val-min_val)
last_date = df_merged.Date.unique()[-1]
max_last_day = df_merged.loc[df_merged["Date"]==last_date, "Confirmed"].sum()
return df_merged, max_last_day
class DFLoaderNew:
def __init__(self, url_list):
self.df_global = pd.read_csv(url_list['world'])
self.df_us = pd.read_csv(url_list['us'])
self._fixUS() # fix US
def load(self):
df_all = | pd.concat([self.df_us, self.df_global]) | pandas.concat |
from scipy import stats
import numpy as np
import pandas as pd
class MM1:
"""
A simple simulation method for MM1 queues
- MM1.get_IAT_and_ST: generates lists of IAT's and ST's drawn from exponential distributions
- MM1.calculate: values for AT, TSB, TSE, TCSS, TCWG and ITS are calculated per arrival
- MM1.get_stats: print basic statistics based on the simulation results
"""
def __init__(self, lam, mu, nr_arr, seed=None):
"""
Initialization the basic time unit is hours.
"""
# todo: consider to remove seed
self.lam = lam # arrivals per hour
self.mu = mu # departures per hour
self.nr_arr = nr_arr # nr of customers
np.random.seed(seed)
def get_IAT_and_ST(self): # generate list of inter arrival times
"""
Generate lists of IAT's and ST's drawn from exponential distributions.
"""
rv_iat = stats.expon(scale=1 / self.lam)
rv_st = stats.expon(scale=1 / self.mu)
# generate list of inter arrival times
IAT = rv_iat.rvs(self.nr_arr)
# generate list of service times
ST = rv_st.rvs(self.nr_arr)
return IAT, ST
def calculate(self, IAT, ST):
"""
Values for AT, TSB, TSE, TCSS, TCWG and ITS are calculated per arrival
"""
df_cust = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 10:37:23 2020
@author: luisr
"""
from bs4 import BeautifulSoup as bs
import requests as req
import pandas as pd
import time
import cssutils
result_df = | pd.DataFrame(columns=['link', 'pagina']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, | StringIO(data) | pandas.compat.StringIO |
import pandas as pd
import numpy as np
import re as re
from base import Feature, get_arguments, generate_features
Feature.dir = 'features'
# """sample usage
# """
# class Pclass(Feature):
# def create_features(self):
# self.train['Pclass'] = train['Pclass']
# self.test['Pclass'] = test['Pclass']
class Year(Feature):
def create_features(self):
self.train["year"] = pd.to_datetime(train["publishedAt"]).dt.year
self.test["year"] = pd.to_datetime(test["publishedAt"]).dt.year
class Month(Feature):
def create_features(self):
self.train["month"] = pd.to_datetime(train["publishedAt"]).dt.month
self.test["month"] = pd.to_datetime(test["publishedAt"]).dt.month
class Day(Feature):
def create_features(self):
self.train["day"] = pd.to_datetime(train["publishedAt"]).dt.day
self.test["day"] = pd.to_datetime(test["publishedAt"]).dt.day
class Hour(Feature):
def create_features(self):
self.train["hour"] = pd.to_datetime(train["publishedAt"]).dt.hour
self.test["hour"] = pd.to_datetime(test["publishedAt"]).dt.hour
class Minute(Feature):
def create_features(self):
self.train["minute"] = | pd.to_datetime(train["publishedAt"]) | pandas.to_datetime |
#Import dependancies
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import requests
import json
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
combined_data = | pd.read_excel('2010-2019combineddata.xls') | pandas.read_excel |
import pandas as pd
import numpy as np
import csv
from tqdm import trange
def clean(file_name,targets=['11612','11613']):
data = pd.read_csv(file_name)
data['result'].fillna(0,inplace=True)
data['result'] = data['result'].astype(int)
items = pd.unique(data['item_id'].values).tolist()
item2id = {itm:i for i,itm in enumerate(items)}
target_ids = [item2id[t] for t in targets]
data['item_id'] = data['item_id'].map(item2id)
data = data.loc[data['rn']==1,:]
log_fo = open('log.txt','w',encoding='utf8')
users = | pd.unique(data['uid'].values) | pandas.unique |
#!/usr/bin/env python
import numpy as np
from scipy import stats
try:
import pickle as pickle
except:
import cPickle as pickle
import gzip
import gibbs
import argparse
import sys, util
import pandas as pd
def SDPRX_gibbs(beta_margin1, beta_margin2, N1, N2, rho, idx1_shared, idx2_shared, ld_boundaries1, ld_boundaries2, ref_ld_mat1, ref_ld_mat2, mcmc_samples,
burn, max_cluster, n_threads, VS=True):
M = [1, 1000, 1000, 1000]
trace = {'alpha':[], 'num_cluster':[], 'beta1':np.zeros(shape=(mcmc_samples, len(beta_margin1))),
'beta2':np.zeros(shape=(mcmc_samples, len(beta_margin2))),
'suffstats':[], 'h2_1':[], 'h2_2':[]}
# initialize
state = gibbs.initial_state(data1=beta_margin1, data2=beta_margin2, idx1_shared=idx1_shared, idx2_shared=idx2_shared, ld_boundaries1=ld_boundaries1, ld_boundaries2=ld_boundaries2, M=M, N1=N1, N2=N2, a0k=.5, b0k=.5)
state['suffstats'] = gibbs.update_suffstats(state)
state['cluster_var'] = gibbs.sample_sigma2(state, rho=rho, VS=True)
state['a'] = 0.1; state['c'] = 1
state['A1'] = [np.linalg.solve(ref_ld_mat1[j]+state['a']*np.identity(ref_ld_mat1[j].shape[0]), ref_ld_mat1[j]) for j in range(len(ld_boundaries1))]
state['B1'] = [np.dot(ref_ld_mat1[j], state['A1'][j]) for j in range(len(ld_boundaries2))]
state['A2'] = [np.linalg.solve(ref_ld_mat2[j]+state['a']*np.identity(ref_ld_mat2[j].shape[0]), ref_ld_mat2[j]) for j in range(len(ld_boundaries1))]
state['B2'] = [np.dot(ref_ld_mat2[j], state['A2'][j]) for j in range(len(ld_boundaries2))]
for i in range(mcmc_samples):
# update everything
gibbs.gibbs_stick_break(state, rho=rho, idx1_shared=idx1_shared, idx2_shared=idx2_shared, ld_boundaries1=ld_boundaries1, ld_boundaries2=ld_boundaries2, ref_ld_mat1=ref_ld_mat1,
ref_ld_mat2=ref_ld_mat2, n_threads=n_threads, VS=VS)
if (i > burn):
trace['h2_1'].append(state['h2_1']*state['eta']**2)
trace['h2_2'].append(state['h2_2']*state['eta']**2)
#if (i % 100 == 0):
# print('h2_1: ' + str(state['h2_1']*state['eta']**2) + 'h2_2: ' + str(state['h2_2']*state['eta']**2) + ' max_beta1: ' + str(np.max(state['beta1']*state['eta'])) + ' max_beta2: ' + str(np.max(state['beta2']*state['eta'])))
# record the result
trace['beta1'][i,] = state['beta1']*state['eta']
trace['beta2'][i,] = state['beta2']*state['eta']
if (state['h2_1'] == 0 and state['h2_2'] == 0):
state = gibbs.initial_state(data1=beta_margin1, data2=beta_margin2, idx1_shared=idx1_shared, idx2_shared=idx2_shared, ld_boundaries1=ld_boundaries1, ld_boundaries2=ld_boundaries2, M=M, N1=N1, N2=N2, a0k=.5, b0k=.5)
state['suffstats'] = gibbs.update_suffstats(state)
state['a'] = 0.1; state['c'] = 1
state['A1'] = [np.linalg.solve(ref_ld_mat1[j]+state['a']*np.identity(ref_ld_mat1[j].shape[0]), ref_ld_mat1[j]) for j in range(len(ld_boundaries1))]
state['B1'] = [np.dot(ref_ld_mat1[j], state['A1'][j]) for j in range(len(ld_boundaries1))]
state['A2'] = [np.linalg.solve(ref_ld_mat2[j]+state['a']*np.identity(ref_ld_mat2[j].shape[0]), ref_ld_mat2[j]) for j in range(len(ld_boundaries2))]
state['B2'] = [np.dot(ref_ld_mat2[j], state['A2'][j]) for j in range(len(ld_boundaries2))]
state['eta'] = 1
util.progressBar(value=i+1, endvalue=mcmc_samples)
# calculate posterior average
poster_mean1 = np.mean(trace['beta1'][burn:mcmc_samples], axis=0)
poster_mean2 = np.mean(trace['beta2'][burn:mcmc_samples], axis=0)
print('h2_1: ' + str(np.median(trace['h2_1'])) + ' h2_2: ' + str(np.median(trace['h2_2'])) + ' max_beta1: ' + str(np.max(poster_mean1)) + ' max_beta2: ' + str(np.max(poster_mean2)))
print(state['pi_pop'])
return poster_mean1, poster_mean2
def pipeline(args):
# sanity check
N1 = args.N1; N2 = args.N2
print('Load summary statistics from {}'.format(args.ss1))
ss1 = pd.read_table(args.ss1)
print('Load summary statistics from {}'.format(args.ss2))
ss2 = pd.read_table(args.ss2)
valid = pd.read_table(args.valid, header=None)[1]
common = list((set(ss1.SNP) | set(ss2.SNP)) & set(valid))
ss1 = ss1[ss1.SNP.isin(common)]
ss2 = ss2[ss2.SNP.isin(common)]
ref_ld_mat1 = []; ref_ld_mat2 = []
ld_boundaries1 = []; ld_boundaries2 = []
A1_1 = []; A1_2 = []; SNP1 = []; SNP2 = []
beta_margin1 = []; beta_margin2 = []
idx1_shared = []; idx2_shared = []
left1 = 0; left2 = 0
f = gzip.open(args.load_ld + '/chr_' + str(args.chr) +'.gz', 'r')
try:
ld_dict = pickle.load(f)
except:
f.seek(0)
ld_dict = pickle.load(f, encoding='latin1')
f.close()
snps = ld_dict[0]; a1 = ld_dict[1]; a2 = ld_dict[2]
ref_boundary = ld_dict[3]; ref1 = ld_dict[4]; ref2 = ld_dict[5]
ref = | pd.DataFrame({'SNP':snps, 'A1':a1, 'A2':a2}) | pandas.DataFrame |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals( | pandas.Series([True, False]) | pandas.Series |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.024011, "end_time": "2021-02-02T22:30:31.951734", "exception": false, "start_time": "2021-02-02T22:30:31.927723", "status": "completed"} tags=[]
# # QA queries on new CDR_deid dateshift
#
# Quality checks performed on a new CDR dataset using QA queries
# + papermill={"duration": 0.709639, "end_time": "2021-02-02T22:30:32.661373", "exception": false, "start_time": "2021-02-02T22:30:31.951734", "status": "completed"} tags=[]
import urllib
import pandas as pd
pd.options.display.max_rows = 120
# + tags=["parameters"]
project_id = ""
com_cdr = ""
deid_cdr = ""
pipeline=""
# -
# df will have a summary in the end
df = pd.DataFrame(columns = ['query', 'result'])
# + [markdown] papermill={"duration": 0.02327, "end_time": "2021-02-02T22:30:32.708257", "exception": false, "start_time": "2021-02-02T22:30:32.684987", "status": "completed"} tags=[]
# # 1 DS_1 Verify that the field identified to follow the date shift rule as de-identification action in OBSERVATION table have been randomly date shifted.
# + papermill={"duration": 4.105203, "end_time": "2021-02-02T22:30:36.813460", "exception": false, "start_time": "2021-02-02T22:30:32.708257", "status": "completed"} tags=[]
query = f'''
WITH df1 AS (
SELECT
DATE_DIFF(DATE(i.observation_date), DATE(d.observation_date),day)-m.shift as diff
FROM `{project_id}.{pipeline}.pid_rid_mapping` m
JOIN `{project_id}.{com_cdr}.observation` i
ON m.person_id = i.person_id
JOIN `{project_id}.{deid_cdr}.observation` d
ON d.observation_id = i.observation_id)
SELECT COUNT(*) AS n_row_not_pass FROM df1
WHERE diff !=0
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.eq(0).any().any():
df = df.append({'query' : 'Query1 OBSERVATION', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query1 OBSERVATION', 'result' : ''},
ignore_index = True)
df1
# -
# # 3 DS_3 Verify that the field identified to follow the date shift rule as de-identification action in OBSERVATION_PERIOD table have been randomly date shifted.
# + papermill={"duration": 2.136748, "end_time": "2021-02-02T22:30:39.044867", "exception": false, "start_time": "2021-02-02T22:30:36.908119", "status": "completed"} tags=[]
query = f'''
WITH df1 AS (
SELECT
DATE_DIFF(DATE(i.observation_period_start_date), DATE(d.observation_period_start_date),day)-m.shift as diff
FROM `{project_id}.{pipeline}.pid_rid_mapping` m
JOIN `{project_id}.{com_cdr}.observation_period` i
ON m.person_id = i.person_id
JOIN `{project_id}.{deid_cdr}.observation_period` d
ON d.observation_period_id = i.observation_period_id)
SELECT COUNT(*) AS n_row_not_pass FROM df1
WHERE diff !=0
'''
df1= | pd.read_gbq(query, dialect='standard') | pandas.read_gbq |
import deimos
import numpy as np
from pandas.core.series import Series
import pytest
from tests import localfile
@pytest.fixture()
def ms1():
return deimos.load_hdf(localfile('resources/example_data.h5'),
key='ms1')
@pytest.mark.parametrize('x,expected',
[('a', ['a']),
(['a', 'b', 'c'], ['a', 'b', 'c']),
(1, [1]),
([1, 2, 3], [1, 2, 3])])
def test_safelist(x, expected):
# list
assert deimos.utils.safelist(x) == expected
# array
assert np.all(deimos.utils.safelist(np.array(x)) == np.array(expected))
# series
assert (deimos.utils.safelist( | Series(x) | pandas.core.series.Series |
import pandas as pd
import numpy as np
from rdtools import energy_from_power
import pytest
# Tests for resampling at same frequency
def test_energy_from_power_calculation():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
expected_energy_series = pd.Series(data=1.0, index=result_times)
expected_energy_series.name = 'energy_Wh'
result = energy_from_power(power_series, max_timedelta=pd.to_timedelta('15 minutes'))
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_max_interval():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
expected_energy_series = pd.Series(data=np.nan, index=result_times)
expected_energy_series.name = 'energy_Wh'
result = energy_from_power(power_series, max_timedelta=pd.to_timedelta('5 minutes'))
# We expect series of NaNs, because max_interval_hours is smaller than the
# time step of the power time series
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_validation():
power_series = pd.Series(data=[4.0] * 4)
with pytest.raises(ValueError):
energy_from_power(power_series, max_timedelta=pd.to_timedelta('15 minutes'))
def test_energy_from_power_single_argument():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 15:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
missing = pd.to_datetime('2018-04-01 13:00:00')
power_series = power_series.drop(missing)
expected_energy_series = pd.Series(data=1.0, index=result_times)
expected_nan = [missing]
expected_nan.append(pd.to_datetime('2018-04-01 13:15:00'))
expected_energy_series.loc[expected_nan] = np.nan
expected_energy_series.name = 'energy_Wh'
# Test that the result has the expected missing timestamp based on median timestep
result = energy_from_power(power_series)
pd.testing.assert_series_equal(result, expected_energy_series)
# Tests for downsampling
def test_energy_from_power_downsample():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
time_series = pd.Series(data=[1.0, 2.0, 3.0, 4.0, 5.0], index=times)
expected_energy_series = pd.Series(index=[pd.to_datetime('2018-04-01 13:00:00')],
data=3.0, name='energy_Wh')
expected_energy_series.index.freq = '60T'
result = energy_from_power(time_series, '60T')
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_downsample_max_timedelta_exceeded():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
time_series = pd.Series(data=[1.0, 2.0, 3.0, 4.0, 5.0], index=times)
expected_energy_series = pd.Series(index=[pd.to_datetime('2018-04-01 13:00:00')],
data=1.5, name='energy_Wh')
expected_energy_series.index.freq = '60T'
result = energy_from_power(time_series.drop(time_series.index[2]), '60T', pd.to_timedelta('15 minutes'))
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_downsample_max_timedelta_not_exceeded():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
time_series = pd.Series(data=[1.0, 2.0, 3.0, 4.0, 5.0], index=times)
expected_energy_series = pd.Series(index=[pd.to_datetime('2018-04-01 13:00:00')],
data=3.0, name='energy_Wh')
expected_energy_series.index.freq = '60T'
result = energy_from_power(time_series.drop(time_series.index[2]), '60T', pd.to_timedelta('60 minutes'))
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_for_issue_107():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 16:00', freq='15T')
dc_power = pd.Series(index=times, data=1.0)
dc_power = dc_power.drop(dc_power.index[5:12])
expected_times = pd.date_range('2018-04-01 13:00', '2018-04-01 16:00', freq='60T')
expected_energy_series = pd.Series(index=expected_times,
data=[1.0, np.nan, np.nan, 1.0],
name='energy_Wh')
result = energy_from_power(dc_power, '60T')
pd.testing.assert_series_equal(result, expected_energy_series)
# Tests for upsampling
def test_energy_from_power_upsample():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:30', freq='30T')
time_series = pd.Series(data=[1.0, 3.0, 5.0, 6.0], index=times)
expected_result_times = | pd.date_range('2018-04-01 12:15', '2018-04-01 13:30', freq='15T') | pandas.date_range |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 15:52:31 2018
@author: sabih
"""
import os
import sys
import pandas as pd
import numpy as np
from functools import lru_cache
import copy
from typing import List
import logging
from exceptions import param_check_correct_list
import datetime
logger_this = logging.getLogger(__name__)
logger_this.setLevel(logging.DEBUG)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
@lru_cache()
class DataImport():
def __init__(self, path=None):
if path is None:
self._path = os.getcwd()
else:
self._path = path
def read_data_csv(self):
self._df_deliveries_idx = pd.read_csv('deliveries.csv')
self._df_customers = pd.read_csv('customer_details.csv')
self._df_colocations = pd.read_csv('colocations.csv')
self._df_levels_idx = pd.read_csv('level_readings.csv')
self._df_levels = self._df_levels_idx.set_index(['ON_DATE_TIME'])
self._df_deliveries = self._df_deliveries_idx.set_index(['DELIVERY_DATE'])
self._df_levels.index = pd.to_datetime(self._df_levels.index)
self._df_deliveries.index = pd.to_datetime(self._df_deliveries.index)
def write_sorted(self):
vessel_ids = self._df_customers['VESSEL_ID']
self._grp_lv_vid = self._df_levels_idx.groupby('VESSEL_ID').groups
npr = np.empty(len(self._df_levels), dtype=np.object)
for idx, vessel_id in enumerate(vessel_ids):
s_mask = self._grp_lv_vid[vessel_id]
index = self._df_levels.iloc[s_mask].index
int_index = self._df_levels_idx.iloc[s_mask].index
print(index.is_monotonic)
if index.is_monotonic == False:
new_index = list(sorted(index))
npr[int_index] = new_index
print('new index is sorted')
else:
new_index =list(index)
npr[int_index] = new_index
self._df_levels_idx['SORTED_DATE_TIME']=pd.to_datetime(npr)
self._df_levels['SORTED_DATE_TIME']=pd.to_datetime(npr)
# self._df_levels.reindex(self._df_levels['SORTED_DATE_TIME'])
self._df_levels.reindex(self._df_levels['SORTED_DATE_TIME'])
self._df_levels.to_hdf('sorted_levels_idx_fin.h5', 'dataset')
# self._df_levels.to_hdf('sorted_levels.h5', 'dataset')
def check_sorted(self):
vessel_ids = self._df_customers['VESSEL_ID']
self._grp_lv_vid = self._df_levels_idx.groupby('VESSEL_ID').groups
self._grp_dl_vid = self._df_deliveries_idx.groupby('VESSEL_ID').groups
for idx, vessel_id in enumerate(vessel_ids):
s_mask = self._grp_lv_vid[vessel_id]
d_mask = self._grp_dl_vid[vessel_id]
index_lv = self._df_levels.iloc[s_mask]['SORTED_DAY_TIME']
index_dl = self._df_deliveries.iloc[d_mask].index
if index_lv.is_monotonic == False:
print("LEVELS ARE NOT MONOTONIC")
if index_dl.is_monotonic == False:
print("DELIVERIES ARE NOT MONOTONIC")
def read_data_hdf(self):
self._df_deliveries = pd.read_hdf('deliveries.h5', 'dataset')
self._df_customers = pd.read_hdf('customers.h5', 'dataset')
self._df_levels = pd.read_hdf('sorted_levels.h5', 'dataset')
self._df_colocations = | pd.read_hdf('colocations.h5', 'dataset') | pandas.read_hdf |
# define custom R2 metrics for Keras backend
from keras import backend as K
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
# base model architecture definition
def model():
model = Sequential()
#input layer
model.add(Dense(input_dims, input_dim=input_dims))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
# hidden layers
model.add(Dense(input_dims))
model.add(BatchNormalization())
model.add(Activation(act_func))
model.add(Dropout(0.3))
model.add(Dense(input_dims//2))
model.add(BatchNormalization())
model.add(Activation(act_func))
model.add(Dropout(0.3))
model.add(Dense(input_dims//4, activation=act_func))
# output layer (y_pred)
model.add(Dense(1, activation='linear'))
# compile this model
model.compile(loss='mean_squared_error', # one may use 'mean_absolute_error' as alternative
optimizer='adam',
metrics=[r2_keras] # you can add several if needed
)
# Visualize NN architecture
print(model.summary())
return model
################K2
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import RobustScaler
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, InputLayer, GaussianNoise
from keras.wrappers.scikit_learn import KerasRegressor
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
#
# Data preparation
#
y_train = train['y'].values
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
# Sscaling features
scaler = RobustScaler()
df_all = scaler.fit_transform(df_all)
train = df_all[:num_train]
test = df_all[num_train:]
# Keep only the most contributing features
sfm = SelectFromModel(LassoCV())
sfm.fit(train, y_train)
train = sfm.transform(train)
test = sfm.transform(test)
print ('Number of features : %d' % train.shape[1])
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def build_model_fn(neurons=20, noise=0.25):
model = Sequential()
model.add(InputLayer(input_shape=(train.shape[1],)))
model.add(GaussianNoise(noise))
model.add(Dense(neurons, activation='tanh'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mean_squared_error', optimizer='nadam', metrics=[r2_keras])
return model
#
# Tuning model parameters
#
model = KerasRegressor(build_fn=build_model_fn, epochs=75, verbose=0)
gsc = GridSearchCV(
estimator=model,
param_grid={
#'neurons': range(18,31,4),
'noise': [x/20.0 for x in range(3, 7)],
},
#scoring='r2',
scoring='neg_mean_squared_error',
cv=5
)
grid_result = gsc.fit(train, y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
for test_mean, test_stdev, train_mean, train_stdev, param in zip(
grid_result.cv_results_['mean_test_score'],
grid_result.cv_results_['std_test_score'],
grid_result.cv_results_['mean_train_score'],
grid_result.cv_results_['std_train_score'],
grid_result.cv_results_['params']):
print("Train: %f (%f) // Test : %f (%f) with: %r" % (train_mean, train_stdev, test_mean, test_stdev, param))
#
# Train model with best params for submission
#
model = build_model_fn(**grid_result.best_params_)
model.fit(train, y_train, epochs=75, verbose=2)
y_test = model.predict(test).flatten()
df_sub = | pd.DataFrame({'ID': id_test, 'y': y_test}) | pandas.DataFrame |
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
from httplib2 import Http
import os
import logging
from pandas_bigquery.exceptions import *
from pandas_bigquery.datasets import Datasets
from pandas_bigquery.tables import Tables
from pandas_bigquery.tabledata import Tabledata
from pandas_bigquery.jobs import Jobs
from pandas import DataFrame, concat
from pandas.compat import lzip
from datetime import datetime
from random import randint
import numpy as np
from time import sleep
try:
from googleapiclient.errors import HttpError
except:
from apiclient.errors import HttpError
log = logging.getLogger()
class Bigquery:
def __init__(self, project_id=os.getenv('BIGQUERY_PROJECT'), private_key_path=os.getenv('BIGQUERY_KEY_PATH')):
if private_key_path is None:
raise RuntimeError('Invalid bigquery key path')
self.project_id = project_id
self.private_key_path = private_key_path
credentials = ServiceAccountCredentials.from_json_keyfile_name(
private_key_path, ['https://www.googleapis.com/auth/bigquery'])
http_auth = credentials.authorize(Http())
self._service = build('bigquery', 'v2', http=http_auth)
with open(private_key_path) as data_file:
self.private_key = data_file.read()
self._tables = Tables(self.project_id, private_key=self.private_key_path)
self._datasets = Datasets(self.project_id, private_key=self.private_key_path)
self._jobs = Jobs(self.project_id, private_key=self.private_key_path)
self._tabledata = Tabledata(self.project_id, private_key=self.private_key_path)
@staticmethod
def _parse_data(schema, rows):
# see:
# http://pandas.pydata.org/pandas-docs/dev/missing_data.html
# #missing-data-casting-rules-and-indexing
dtype_map = {'FLOAT': np.dtype(float),
'TIMESTAMP': 'M8[ns]'}
fields = schema['fields']
col_types = [field['type'] for field in fields]
col_names = [str(field['name']) for field in fields]
col_dtypes = [dtype_map.get(field['type'], object) for field in fields]
page_array = np.zeros((len(rows),), dtype= | lzip(col_names, col_dtypes) | pandas.compat.lzip |
import logging
import click
from pathlib import Path
import pandas as pd
import numpy as np
import geopy.distance
import datetime
import pytz
from src.filename import BOOKING_PREPROCESSED, PARTICIPANT_PREPROCESSED, TEST_PREPROCESSED, TRAIN_TRANSFORMED, TRAIN, TEST
def cal_dist(row):
lat_x, long_x, lat_y, long_y = row['pickup_latitude'], row['pickup_longitude'], row['driver_latitude'], row['driver_longitude']
coord_x = (lat_x, long_x)
coord_y = (lat_y, long_y)
return geopy.distance.distance(coord_x, coord_y).km
def is_peak(row):
local_tz = pytz.timezone('Asia/Jakarta')
utc_ts = row['event_timestamp']
try:
utc_ts = datetime.datetime.strptime(utc_ts, '%Y-%m-%d %H:%M:%S+00:00')
except:
utc_ts = datetime.datetime.strptime(utc_ts, '%Y-%m-%d %H:%M:%S.%f000+00:00')
local_dt = utc_ts.replace(tzinfo=pytz.utc).astimezone(local_tz)
day = local_dt.isoweekday()
time = local_dt.time()
if 1 <= day <= 4:
if ((time >= datetime.time(7, 0)) and (time <= datetime.time(10, 0))) | ((time >= datetime.time(17, 0)) and (time <= datetime.time(20, 0))):
return 1
else:
return 0
else:
if ((time >= datetime.time(8, 0)) and (time <= datetime.time(10, 0))) | ((time >= datetime.time(17, 0)) and (time <= datetime.time(23, 59))):
return 1
else:
return 0
def train_transform(input_filepath, output_filepath):
booking = pd.read_csv(input_filepath + '/%s' % BOOKING_PREPROCESSED)
driver = pd.read_csv(input_filepath + '/%s' % PARTICIPANT_PREPROCESSED)
# create base dataset, each row represents one booking allocation
# output indicates whether the allocation trip is completed
# having the same columns as test, except output (to be predicted for test data)
created_booking_cols = ['event_timestamp', 'order_id','trip_distance', 'pickup_latitude', 'pickup_longitude']
created = booking[booking.booking_status =='CREATED'][created_booking_cols]
completed_booking_cols = ['order_id', 'driver_id']
completed = booking[booking.booking_status == 'COMPLETED'][completed_booking_cols]
booking_base = pd.merge(created, completed, on=['order_id'], how='left').rename(columns={'driver_id':'booking_driver_id'})
driver_cols = ['order_id', 'driver_id', 'driver_latitude', 'driver_longitude', 'driver_gps_accuracy']
driver_base = driver[driver_cols]
train_transformed = | pd.merge(driver_base, booking_base, on=['order_id'], how='left') | pandas.merge |
from pandas import DataFrame, Series
import src.tcx as tcx
COLUMN_NAME_GEAR_RATIO = "gear-ratio"
class TrainDataSet:
def __init__(self, t: tcx.Tcx):
self.df = t.to_dataframe().copy()
self.df.dropna(axis=0, subset=[tcx.COLUMN_NAME_CADENCE, tcx.COLUMN_NAME_SPEED], inplace=True) # drop rows with nan in cad & speed
self.df[COLUMN_NAME_GEAR_RATIO] = self.df[tcx.COLUMN_NAME_SPEED] / self.df[tcx.COLUMN_NAME_CADENCE]
def get_dataframe(self) -> DataFrame:
return self.df.copy()
def cadence_to_power(self) -> tuple:
return (DataFrame(data={tcx.COLUMN_NAME_CADENCE: self.df[tcx.COLUMN_NAME_CADENCE]}), self._power)
def cadence_to_speed(self) -> tuple:
return (DataFrame(data={tcx.COLUMN_NAME_CADENCE: self.df[tcx.COLUMN_NAME_CADENCE]}), self.df[tcx.COLUMN_NAME_SPEED])
def speed_to_cadence(self) -> tuple:
return ( | DataFrame(data={tcx.COLUMN_NAME_SPEED: self.df[tcx.COLUMN_NAME_SPEED]}) | pandas.DataFrame |
'''
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
GDELTbase.py
Class for creating/maintaining data directory structure, bulk downloading of
GDELT files with column reduction, parsing/cleaning to JSON format, and export
of cleaned records to MongoDB.
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
See license.txt for information related to each open-source library used.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations.
If those directories are not already present, a fallback method for
string-literal directory reorientation may be found in GDELTbase shared class
data at this tag: # A01a - backup path specification.
Any given user's project directory must be specified there.
See also GDELTeda.py, tag # A02b - Project directory path, as any given user's
project directory must be specified for that os.chdir() call, also.
Contents:
A00 - GDELTbase
A01 - shared class data (toolData, localDb)
A01a - backup path specification
Note: Specification at A01a should be changed to suit a user's desired
directory structure, given their local filesystem.
A02 - __init__ w/ instanced data (localFiles)
B00 - class methods
B01 - updateLocalFilesIndex
B02 - clearLocalFilesIndex
B03 - showLocalFiles
B04 - wipeLocalFiles
B05 - extensionToTableName
B06 - isFileDownloaded
B07 - downloadGDELTFile
B08 - downloadGDELTDay
B09 - cleanFile (includes the following field/subfield parser functions)
B09a - themeSplitter
B09b - locationsSplitter
B09c - personsSplitter
B09d - organizationsSplitter
B09e - toneSplitter
B09f - countSplitter
B09g - One-liner date conversion function for post-read_csv use
B09h - llConverter
B10 - cleanTable
B11 - mongoFile
B12 - mongoTable
C00 - main w/ testing
'''
import pandas as pd
import numpy as np
import os
import pymongo
import wget
import json
from time import time
from datetime import datetime, tzinfo
from zipfile import ZipFile as zf
from pprint import pprint as pp
from urllib.error import HTTPError
# A00
class GDELTbase:
'''Base object for GDELT data acquisition, cleaning, and storage.
Shared class data:
-----------------
toolData - dict with these key - value pairs:
URLbase - "http://data.gdeltproject.org/gdeltv2/"
path - os.path path objects, 'raw' and 'clean', per-table
names - lists of string column names, per-table, original and reduced
extensions - dict mapping table names to file extensions, per-table
columnTypes - dicts mapping table column names to appropriate types
localDb - dict with these key - value pairs:
client - pymongo.MongoClient()
database - pymongo.MongoClient().capstone
collections - dict mapping table names to suitable mongoDB collections
Instanced class data:
--------------------
localFiles - dict, per-table keys for lists of local 'raw' and 'clean'
filenames
Class methods:
-------------
updateLocalFilesIndex()
clearLocalFilesIndex()
showLocalFiles()
wipeLocalFiles()
extensionToTableName()
isFileDownloaded()
downloadGDELTFile()
downloadGDELTDay()
cleanFile()
cleanTable()
mongoFile()
mongoTable()
'''
# A01 - shared class data
toolData = {}
# A01a - backup path specification
# Failsafe path for local main project directory. Must be changed to suit
# location of any given end-user's 'script' directory in case directory
# 'GDELTdata' is not present one directory up.
toolData['projectPath'] = 'C:\\Users\\urf\\Projects\\WGU capstone'
# Controls generation of datafile download URLs in downloadGDELTDay()/File()
toolData['URLbase'] = "http://data.gdeltproject.org/gdeltv2/"
# Used in forming URLs for datafile download
toolData['extensions'] = {
'events' : "export.CSV.zip",
'gkg' : "gkg.csv.zip",
'mentions' : "mentions.CSV.zip",
}
# These paths are set relative to the location of this script, one directory
# up, in 'GDELTdata', parallel to the script directory.
toolData['path'] = {}
toolData['path']['base']= os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'GDELTdata')
toolData['path']['events'] = {
'table': os.path.join(toolData['path']['base'], 'events'),
'raw': os.path.join(toolData['path']['base'], 'events', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'events', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'events',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'events',
'realtimeClean')
}
toolData['path']['gkg'] = {
'table': os.path.join(toolData['path']['base'], 'gkg'),
'raw': os.path.join(toolData['path']['base'], 'gkg', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'gkg', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'gkg',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'gkg',
'realtimeClean')
}
toolData['path']['mentions'] = {
'table': os.path.join(toolData['path']['base'], 'mentions'),
'raw': os.path.join(toolData['path']['base'], 'mentions', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'mentions', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'mentions',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'mentions',
'realtimeClean')
}
# These mappings and lists are for recognition of all possible
# column names, and the specific discarding of a number of columns
# which have been predetermined as unnecessary in the context of
# simple EDA.
toolData['names'] = {}
toolData['names']['events'] = {
'original' : [
'GLOBALEVENTID',
'Day',
'MonthYear',
'Year',
'FractionDate',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1KnownGroupCode',
'Actor1EthnicCode',
'Actor1Religion1Code',
'Actor1Religion2Code',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2KnownGroupCode',
'Actor2EthnicCode',
'Actor2Religion1Code',
'Actor2Religion2Code',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'GoldsteinScale',
'NumMentions',
'NumSources',
'NumArticles',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_CountryCode',
'Actor1Geo_ADM1Code',
'Actor1Geo_ADM2Code',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor1Geo_FeatureID',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_CountryCode',
'Actor2Geo_ADM1Code',
'Actor2Geo_ADM2Code',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'Actor2Geo_FeatureID',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_CountryCode',
'ActionGeo_ADM1Code',
'ActionGeo_ADM2Code',
'ActionGeo_Lat',
'ActionGeo_Long',
'ActionGeo_FeatureID',
'DATEADDED',
'SOURCEURL',
],
'reduced' : [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
],
}
toolData['names']['gkg'] = {
'original' : [
'GKGRECORDID',
'V21DATE',
'V2SourceCollectionIdentifier',
'V2SourceCommonName',
'V2DocumentIdentifier',
'V1Counts',
'V21Counts',
'V1Themes',
'V2EnhancedThemes',
'V1Locations',
'V2EnhancedLocations',
'V1Persons',
'V2EnhancedPersons',
'V1Organizations',
'V2EnhancedOrganizations',
'V15Tone',
'V21EnhancedDates',
'V2GCAM',
'V21SharingImage',
'V21RelatedImages',
'V21SocialImageEmbeds',
'V21SocialVideoEmbeds',
'V21Quotations',
'V21AllNames',
'V21Amounts',
'V21TranslationInfo',
'V2ExtrasXML',
],
'reduced' : [
'GKGRECORDID',
'V21DATE',
'V2SourceCommonName',
'V2DocumentIdentifier',
'V1Counts',
'V1Themes',
'V1Locations',
'V1Persons',
'V1Organizations',
'V15Tone',
],
}
toolData['names']['mentions'] = {
'original' : [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'SentenceID', #
'Actor1CharOffset',#
'Actor2CharOffset',#
'ActionCharOffset',#
'InRawText',
'Confidence',
'MentionDocLen', #
'MentionDocTone',
'MentionDocTranslationInfo', #
'Extras', #
],
'reduced' : [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'InRawText',
'Confidence',
'MentionDocTone',
],
}
# These mappings are used in automated dtype application to Pandas
# DataFrame collections of GDELT records, part of preprocessing.
toolData['columnTypes'] = {}
toolData['columnTypes']['events'] = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : pd.StringDtype(),
'Actor2Code': pd.StringDtype(),
'Actor2Name': pd.StringDtype(),
'Actor2CountryCode': | pd.StringDtype() | pandas.StringDtype |
import gc
import numpy as np
import pandas as pd
import tables
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from icu_benchmarks.common import constants
def gather_cat_values(common_path, cat_values):
# not too many, so read all of them
df_cat = pd.read_parquet(common_path, columns=list(cat_values))
d = {}
for c in df_cat.columns:
d[c] = [x for x in df_cat[c].unique() if not np.isnan(x)]
return d
def gather_stats_over_dataset(parts, to_standard_scale, to_min_max_scale, train_split_pids, fill_string):
minmax_scaler = MinMaxScaler()
for p in parts:
df_part = impute_df(pd.read_parquet(p, engine='pyarrow', columns=to_min_max_scale + [constants.PID],
filters=[(constants.PID, "in", train_split_pids)]), fill_string=fill_string)
df_part = df_part.replace(np.inf, np.nan).replace(-np.inf, np.nan)
minmax_scaler.partial_fit(df_part[to_min_max_scale])
gc.collect()
means = []
stds = []
# cannot read all to_standard_scale columns in memory, one-by-one would very slow, so read a certain number
# of columns at a time
batch_size = 20
batches = (to_standard_scale[pos:pos + batch_size] for pos in range(0, len(to_standard_scale), batch_size))
for s in batches:
dfs = impute_df(pd.read_parquet(parts[0].parent, engine='pyarrow', columns=[constants.PID] + s,
filters=[(constants.PID, "in", train_split_pids)]),
fill_string=fill_string)
dfs = dfs.replace(np.inf, np.nan).replace(-np.inf, np.nan)
# don't rely on sklearn StandardScaler as partial_fit does not seem to work correctly if in one iteration all values
# of a column are nan (i.e. the then mean becomes nan)
means.extend(dfs[s].mean())
stds.extend(dfs[s].std(ddof=0)) # ddof=0 to be consistent with sklearn StandardScalar
gc.collect()
return (means, stds), minmax_scaler
def to_ml(save_path, parts, labels, features, endpoint_names, df_var_ref, fill_string, split_path=None, random_seed=42):
df_part = pd.read_parquet(parts[0])
data_cols = df_part.columns
common_path = parts[0].parent
df_pid_and_time = pd.read_parquet(common_path, columns=[constants.PID, constants.DATETIME])
# list of patients for every split
split_ids = get_splits(df_pid_and_time, split_path, random_seed)
cat_values, binary_values, to_standard_scale, to_min_max_scale = get_var_types(data_cols, df_var_ref)
cat_vars_levels = gather_cat_values(common_path, cat_values)
(means, stds), minmax_scaler = gather_stats_over_dataset(parts, to_standard_scale, to_min_max_scale,
split_ids['train'], fill_string)
# for every train, val, test split keep how many records have already been written (needed to compute correct window position)
output_offsets = {}
features_available = features
if not features_available:
features = [None] * len(parts)
for p, l, f in zip(parts, labels, features):
df = impute_df(pd.read_parquet(p), fill_string=fill_string)
df_feat = pd.read_parquet(f) if f else pd.DataFrame(columns=[constants.PID])
df_label = pd.read_parquet(l)[
[constants.PID, constants.REL_DATETIME] + list(endpoint_names)]
df_label = df_label.rename(columns={constants.REL_DATETIME: constants.DATETIME})
df_label[constants.DATETIME] = df_label[constants.DATETIME] / 60.0
# align indices between labels df and common df
df_label = df_label.set_index([constants.PID, constants.DATETIME])
df_label = df_label.reindex(index=zip(df[constants.PID].values, df[constants.DATETIME].values))
df_label = df_label.reset_index()
for cat_col in cat_values:
df[cat_col] = | pd.Categorical(df[cat_col], cat_vars_levels[cat_col]) | pandas.Categorical |
"""
Import and export functions for the Chromium Laser Ablation Navigation Software.
"""
import re
import csv
import numpy as np
import pandas as pd
from pathlib import Path
def split_config(s):
"""
Splits a config-formatted string.
Parameters
----------
s : :class:`str`
Returns
---------
:class:`dict`
See Also
---------
:func:`get_scandata`
Todo
-----
Consider using :mod:`configparser` to read .lase files.
"""
x = re.split(r";", s)
d = {k: v for (k, v) in [i.split("=") for i in x]}
return d
def get_scandata(scandict):
"""
Process a dictionary of scan information into a :class:`~pandas.DataFrame`.
Parameters
----------
scandict : :class:`dict`
Dictionary of scan data.
Returns
---------
:class:`pandas.DataFrame`
See Also
---------
:class:`ScanData`
"""
headers = scandict["Header"].split(",")
scannames = [i for i in scandict.keys() if not i == "Header"]
no_scans = len(scannames)
df = | pd.DataFrame(columns=headers, index=scannames) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 22 11:04:26 2018
@author: kcf
"""
#########################################################################################################
# Peptide Class + some functions and support classes
# written by <NAME>
# 11.06.2019
# For question come by and ask or write to <EMAIL>
#########################################################################################################
import re
import numpy as np
import matplotlib.pyplot as plt
import random
import pandas as pd
from math import sqrt
#########################################################################################################
def factorial(n):
"""
n should be a integer n>=0
It calculates n!=n*(n-1)*(n-2)... *(n-(n-1))
Limit seems to be the number of recursion possible, on Win32 n=2970
factorial calls itself with n-1 down to n=1
n!=n*factorial(n-1)
0! = 1 by definition
"""
if type(n)!=int:
print ("only int allowed\n")
return
if n==0:
return 1
if n<0:
print (" Only n>=0 allowed \nPrinting abs(n)!")
n=-n
m=n*factorial(n-1)
return m
###########################################################################################################
############################################################################################################
class molecule:
'''
Molecule Class
extract_count() and parse_sf() convert an entered sumformula string (CH4) into numbers elements('C')=1
calc_peaks() and isopattern calculate the isotope peak abundance. Code was org. written ~15 years ago in C/C++
and converted now into python
'''
def __init__(self,sumformula='CH3Cl'):
self.elements = {
'C':0,
'H':0,
'N':0,
'O':0,
'Cl':0,
'S':0
}
self.sumformula=sumformula
self.parse_sf(self.sumformula)
self.mw=0.0
self.monomass=0.0
self.isotopepeak=[1.0]*20 # array to safe the isotope pattern
for ele in self.elements:
self.mw+=element_mass_avg[ele]*self.elements[ele]
self.monomass+=element_mass_mono[ele][0]*self.elements[ele]
# self.isopattern() # subroutine to calc the isotope pattern
def info(self):
self.isopattern() # subroutine to calc the isotope pattern
x=np.arange(20)
for i in range (20):
x[i]+=self.monomass
plt.bar(x, height=self.isotopepeak)
plt.xlabel('m/z')
plt.ylabel('rel. Abundance')
plt.title('Isotope pattern')
info='MW {:8.4f}\nMono isotopic Mass {:8.4f}'.format(self.mw, self.monomass)
# plt.savefig('BayesianRidge trainset PCA', dpi=300,bbox_inches='tight', pad_inches=0.2)
plt.gcf().text(1.0,0.75,info, fontsize=12)
plt.show()
return
def extract_count(self, sumformula='CH4',element='C'):
'extracting the number of a certain element in a sum formula'
position=sumformula.find(element)
if len(sumformula)>position+len(element):
if sumformula[position+len(element)].isalpha():
self.elements[element]=1
re.sub(element,'',sumformula)
else:
m=re.findall(element+'(\d+)',sumformula)
self.elements[element]=int(m[0])
re.sub(element+m[0],'',sumformula)
else:
self.elements[element]=1
re.sub(element,'',sumformula)
return sumformula;
def parse_sf(self,sumformula): #double lettered elements on top.
if 'Cl' in sumformula:
sumformula=self.extract_count(sumformula,'Cl')
if 'C' in sumformula:
sumformula=self.extract_count(sumformula,'C')
if 'H' in sumformula:
sumformula=self.extract_count(sumformula,'H')
if 'N' in sumformula:
sumformula=self.extract_count(sumformula,'N')
if 'O' in sumformula:
sumformula=self.extract_count(sumformula,'O')
if 'S' in sumformula:
sumformula=self.extract_count(sumformula,'S')
def calcpeaks(self,array1=[],array2=[]): #subroutine for isotope patter calc
matrix=[[0.0]*20 for i in range(20)]
for i in range (20):
for j in range (20):
if i+j<20:
matrix[i][i+j]=array1[i]*array2[j]
for i in range (20):
array1[i]=0.0
for j in range (20):
array1[i]+=matrix[j][i]
def isopattern(self): #subroutine for isotope patter calc
elementpeaks = {
'C':0,
'H':0,
'N':0,
'O':0,
'Cl':0,
'S':0
}
for ele in self.elements:
elementpeaks[ele]= [0.0]*20
dummy = [0.0]*20
for ele in self.elements:
if self.elements[ele]>0:
for i in range (20):
if (i<4):
elementpeaks[ele][i]=element_ab[ele][i]/element_ab[ele][0]
dummy[i]=element_ab[ele][i]/element_ab[ele][0]
else:
elementpeaks[ele][i]=0.0
dummy[i]=0.0
for i in range(2,self.elements[ele]+1):
self.calcpeaks(elementpeaks[ele],dummy) #subroutine here
for i in range(20):
self.isotopepeak[i]=0.0
self.isotopepeak[0]=1.0
for ele in self.elements:
if self.elements[ele]>0:
self.calcpeaks(self.isotopepeak,elementpeaks[ele]) #subroutine here
maxpeak=0.0 # Finding the max isotope and norm it to 1.0
for i in range(20): #
maxpeak=max(maxpeak,self.isotopepeak[i]) #
for i in range(20): #
self.isotopepeak[i]=self.isotopepeak[i]/maxpeak #
######################################################################################################################################################
class peptide(molecule):
'''
peptide class skeleton
'''
def __init__(self,sequence='KILIAN', amide=False, acetyl=False, cyclic=False, label='', qda = 0.0, exist=False):
self.sequence = sequence #list of amino acids objects
self.length=len(sequence)
self.elements = {
'C':0,
'H':0,
'N':0,
'O':0,
'Cl':0,
'S':0
}
self.sumformula=''
self.mw=0.0
self.monomass=0.0
self.acetyl=acetyl
self.amide=amide
self.cyclic=cyclic
self.label=label
self.biol = []
self.biolpre = 0.0
self.qda = qda #mass measured
self.exist=exist
self.zscale3=[]
self.zscale5=[]
self.zscale5mat=[]
if self.cyclic==False:
if self.amide==False:
self.elements['O']+=1
self.elements['H']+=1
else:
self.elements['N']+=1
self.elements['H']+=2
if self.acetyl==False:
self.elements['H']+=1
else:
self.elements['0']+=1
self.elements['H']+=3
self.elements['C']+=2
self.parse_sequence(sequence)
self.parse_sf(self.sumformula)
self.isotopepeak=[1.0]*20 # array to safe the isotope pattern
for ele in self.elements:
self.mw+=element_mass_avg[ele]*self.elements[ele]
self.monomass+=element_mass_mono[ele][0]*self.elements[ele]
#self.isopattern() # subroutine to calc the isotope pattern
for aa in self.sequence:
self.zscale5mat.append(aminoacids[aa][2])
for i in range(5):
if i<3:
self.zscale3.append(aminoacids[aa][2][i])
self.zscale5.append(aminoacids[aa][2][i])
def parse_sequence(self,sequence):
for i in range(len(sequence)):
#self.sequence.append(aminoacids[sequence[i]])
for ele in aminoacids[sequence[i]][1]:
self.elements[ele]+=aminoacids[sequence[i]][1][ele]
sum=''
for ele in self.elements:
if self.elements[ele]==0:
continue
else:
sum+=ele
if self.elements[ele]>1:
sum+=str(self.elements[ele])
self.sumformula=sum
def get_zscale(self, number = 0, zscale = 0): #aa number from N-terminal by convention
aa = self.sequence[number]
return (aminoacids[aa][2][zscale])
######################################################################################################################################################
class aminoacid:
'''
amino acid class skeleton
'''
def __init__(self,AA='A'):
self.type=AA
######################################################################################################################################################
element_mass_avg = {
'C':12.011,
'H':1.00794,
'Cl':35.4527,
'O':15.9994,
'N':14.00674,
'S':32.059
}
element_mass_mono = {
'C':[12.0],
'H':[1.007825],
'Cl':[34.968853],
'O':[15.994915],
'N':[14.003074],
'S':[31.972071]
}
element_ab = {
'C':[0.989, 0.011, 0.0, 0.0],
'H':[0.99985, 0.00015, 0.0, 0.0],
'Cl':[0.7577, 0.0, 0.2423, 0.0],
'O':[0.99762, 0.00038, 0.002, 0.0],
'N':[0.99634, 0.00366, 0.0, 0.0],
'S':[0.9502, 0.0075, 0.0421, 0.0002]
}
aminoacids = {
'A':['C3H5NO',{'C':3,'H':5,'N':1,'O':1,'S':0},[0.24,-2.32,0.6,-0.14,1.3]],
'C':['C3H5NOS',{'C':3,'H':5,'N':1,'O':1,'S':1},[0.84,-1.67,3.71,0.18,-2.65]],
'D':['C4H5NO3',{'C':4,'H':5,'N':1,'O':3,'S':0},[3.98,0.93,1.93,-2.46,0.75]],
'E':['C5H7NO3',{'C':5,'H':7,'N':1,'O':3,'S':0},[3.11,0.26,-0.11,-3.04,-0.25]],
'F':['C9H9NO',{'C':9,'H':9,'N':1,'O':1,'S':0},[-4.22,1.94,1.06,0.54,-0.62]],
'G':['C2H3NO',{'C':2,'H':3,'N':1,'O':1,'S':0},[2.05,-4.06,0.36,-0.82,-0.38]],
'H':['C6H7N3O',{'C':6,'H':7,'N':3,'O':1,'S':0},[2.47,1.95,0.26,3.9,0.09]],
'I':['C6H11NO',{'C':6,'H':11,'N':1,'O':1,'S':0},[-3.89,-1.73,-1.71,-0.84,0.26]],
'K':['C6H12N2O',{'C':6,'H':12,'N':2,'O':1,'S':0},[2.29,0.89,-2.49,1.49,0.31]],
'L':['C6H11NO',{'C':6,'H':11,'N':1,'O':1,'S':0},[-4.28,-1.3,-1.49,-0.72, 0.84]],
'M':['C5H9NOS',{'C':5,'H':9,'N':1,'O':1,'S':1},[-2.85,-0.22,0.47,1.94,-0.98]],
'N':['C4H6N2O2',{'C':4,'H':6,'N':2,'O':2,'S':0},[3.05,1.62,1.04,-1.15,1.61]],
'P':['C5H7NO',{'C':5,'H':7,'N':1,'O':1,'S':0},[-1.66,0.27,1.84,0.7,2.0]],
'Q':['C5H8N2O2',{'C':5,'H':8,'N':2,'O':2,'S':0},[1.75,0.5,-1.44,-1.34,0.66]],
'R':['C6H12N4O',{'C':6,'H':12,'N':4,'O':1,'S':0},[3.52,2.5,-3.5,1.99,-0.17]],
'S':['C3H5NO2',{'C':3,'H':5,'N':1,'O':2,'S':0},[2.39,-1.07,1.15,-1.39,0.67]],
'T':['C4H7NO2',{'C':4,'H':7,'N':1,'O':2,'S':0},[0.75,-2.18,-1.12,-1.46,-0.4]],
'V':['C5H9NO',{'C':5,'H':9,'N':1,'O':1,'S':0},[-2.59,-2.64,-1.54,-0.85,-0.02]],
'W':['C11H10N2O',{'C':11,'H':10,'N':2,'O':1,'S':0},[-4.36,3.94,0.59,3.44,-1.59]],
'Y':['C9H9NO2',{'C':9,'H':9,'N':1,'O':2,'S':0},[-2.54,2.44,0.43,0.04,-1.47]],
# 'X':['C4H7NO',{'C':4,'H':7,'N':1,'O':1,'S':0},[-1.33, -2.8, -0.61, -0.55, 0.4]] #Aib
}
####################################################################################################################################################
def ala_scan(peptidelist, pep):
cyclic=pep.cyclic
amide=pep.amide
acetyl=pep.acetyl
for i in range (pep.length):
seq=pep.sequence[:i]+'A'+pep.sequence[i+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
def con_mut(peptidelist, pep):
#basic amino acids K R H
#aromatic F Y W
#acidic E D
#hydrophilic S T N Q
#hydrophobic F Y W I L V M
#subroutine to change the input sequence pep using conservative mutations
cyclic=pep.cyclic
amide=pep.amide
acetyl=pep.acetyl
for i in range (pep.length):
if pep.sequence[i] in 'KRH':
for aa in 'KRH':
if aa!=pep.sequence[i]:
seq=pep.sequence[:i]+aa+pep.sequence[i+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
continue
if pep.sequence[i] in 'FYW':
for aa in 'FYW':
if aa!=pep.sequence[i]:
seq=pep.sequence[:i]+aa+pep.sequence[i+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
continue
if pep.sequence[i] in 'ED':
for aa in 'ED':
if aa!=pep.sequence[i]:
seq=pep.sequence[:i]+aa+pep.sequence[i+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
continue
if pep.sequence[i] in 'STNQ':
for aa in 'STNQ':
if aa!=pep.sequence[i]:
seq=pep.sequence[:i]+aa+pep.sequence[i+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
continue
if pep.sequence[i] in 'FYWILVM':
for aa in 'FYWILVM':
if aa!=pep.sequence[i]:
seq=pep.sequence[:i]+aa+pep.sequence[i+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
continue
def charge_scan(peptidelist, pep, basic='K',acid='E'):
cyclic=pep.cyclic
amide=pep.amide
acetyl=pep.acetyl
for i in range (pep.length):
seq=pep.sequence[:i]+basic+pep.sequence[i+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
seq=pep.sequence[:i]+acid+pep.sequence[i+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
def saltbridge_scan(peptidelist, pep, basic='K',acid='E', ra=0, dis=4):
if ra==0:
ra=pep.lenght
cyclic=pep.cyclic
amide=pep.amide
acetyl=pep.acetyl
for i in range (ra-dis):
seq=pep.sequence[:i]+basic+pep.sequence[i+1:]
seq=seq[:i+dis]+acid+seq[i+dis+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
seq=pep.sequence[:i]+acid+pep.sequence[i+1:]
seq=seq[:i+dis]+basic+seq[i+dis+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
def random_walk(peptidelist, pep, n=2, m=10, exclude_C=True): #n= number of mutations, m=number of peptides
cyclic=pep.cyclic
amide=pep.amide
acetyl=pep.acetyl
for i in range(m):
for j in range(n):
mut=random.randint(0,len(pep.sequence)-1) #pythons randint goes from x to z both included, numpy excludes z
possible_aa = list(aminoacids.keys())
if exclude_C == True:
possible_aa.remove('C')
random_aa = random.choice(possible_aa)
#print (possible_aa)
seq=pep.sequence[:mut]+random_aa+pep.sequence[mut+1:]
newpeptide = peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
def random_walk2(peptidelist, pep, n=2, m=10, exclude_C=True, old_pep_dict={}):
#n= number of mutations, m=number of peptides
# this routine checks for duplicates and creates a list with m unique sequences
# neither this nor random_walk is working properly
# seems to work now, 12.Nov. 2019
# will also check whether the new peptides already exist in a dict
# now with up n mutation, adding a randint(1,n) statement
cyclic=pep.cyclic
amide=pep.amide
acetyl=pep.acetyl
listofstrings = {}
listofstrings[pep.sequence]=1
while len(listofstrings)<m+1:
seq=pep.sequence
number_of_mut=random.randint(1,n)
for j in range(number_of_mut):
mut=random.randint(0,len(pep.sequence)-1) #pythons randint goes from x to z both included, numpy excludes z
possible_aa = list(aminoacids.keys())
if exclude_C == True:
possible_aa.remove('C')
random_aa = random.choice(possible_aa)
#print (pep.sequence[:mut]+'\x1b[6;30;42m'+random_aa+'\x1b[0m'+pep.sequence[mut+1:])
seq=seq[:mut]+random_aa+seq[mut+1:]
if seq in old_pep_dict.keys():
continue
listofstrings[seq]=1
listofstrings.pop(pep.sequence)
for i in listofstrings:
newpeptide = peptide(sequence=i,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidelist.append(newpeptide)
#this routine checks for duplicates and creates a list with m unique sequences out of the top x peptides
def random_walk3(peptidedict, top=10, n=2, m=10, exclude_C=True, feat=0):
possible_aa = list(aminoacids.keys())
if exclude_C == True:
possible_aa.remove('C')
peptidelist=[]
for peptides in peptidedict:
peptidelist.append(peptidedict[peptides])
peptidelist.sort(key=lambda x: x.biol[feat], reverse=False)
for i in range (top):
k=0
cyclic=peptidelist[i].cyclic
amide=peptidelist[i].amide
acetyl=peptidelist[i].acetyl
while k <m:
seq=peptidelist[i].sequence
for j in range(n):
mut=random.randint(0,len(seq)-1) #pythons randint goes from x to z both included, numpy excludes z
random_aa = random.choice(possible_aa)
seq=seq[:mut]+random_aa+seq[mut+1:]
if seq in peptidedict:
continue
else:
k=k+1
newpeptide=peptide(sequence=seq,cyclic=cyclic, amide=amide, acetyl=acetyl)
peptidedict[seq]=newpeptide
return
def write_lib(peplist, filename='test.txt', row1='.space', row2='.space', n=80):
seq_list= []
for k in range(n//80):
seq_list.append('; Plate '+str(k+1))
for i in range (8):
seq_list.append(row1)
seq_list.append(row2)
for j in range (10):
blanked_seq=''.join(peplist[i*10+j+k*80].sequence[m]+' 'for m in range(len(peplist[i*10+j+k*80].sequence)-1))
blanked_seq=blanked_seq+peplist[i*10+j+k*80].sequence[len(peplist[i*10+j+k*80].sequence)-1]
seq_list.append(blanked_seq)
newfile= | pd.DataFrame(data=seq_list) | pandas.DataFrame |
import numpy as np
import os
import sys
import types
import pywt
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
#import requests, io
from scipy.io import wavfile
import mywavfile
#from features import mfcc, logfbank
#https://python-speech-features.readthedocs.io/en/latest/
from python_speech_features import mfcc
from python_speech_features import logfbank
from numpy import savetxt #jonas test
import write_op as wo
import time
start = time.time()
save_individual_results = True
save_summary_result = True
from collections import Iterable
def flatten(lis):
'''convert nested list into one dimensional list'''
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
def spectral_centroid(x, samplerate):
''' source: https://stackoverflow.com/questions/24354279/python-spectral-centroid-for-a-wav-file'''
magnitudes = np.abs(np.fft.rfft(x)) # magnitudes of positive frequencies
length = len(x)
freqs = np.abs(np.fft.fftfreq(length, 1.0/samplerate)[:length//2+1]) # positive frequencies
return np.sum(magnitudes*freqs) / np.sum(magnitudes) # return weighted mean
def pad(array, reference_shape, offsets=None):
"""
array: Array to be padded
reference_shape: tuple of size of narray to create
offsets: list of offsets (number of elements must be equal to the dimension of the array)
will throw a ValueError if offsets is too big and the reference_shape cannot handle the offsets
"""
if not offsets:
offsets = np.zeros(array.ndim, dtype=np.int32)
#offsets = np.zeros(array.ndim, dtype=object)
# Create an array of zeros with the reference shape
result = np.zeros(reference_shape, dtype=np.float32)
#result = np.zeros(reference_shape, dtype=object)
# Create a list of slices from offset to offset + shape in each dimension
insertHere = [slice(offsets[dim], offsets[dim] + array.shape[dim]) for dim in range(array.ndim)]
# Insert the array in the result at the specified offsets
result[insertHere] = array
return result
#function to get the audio files
def get_audio_files(ip_dir):
matches = []
for root, dirnames, filenames in os.walk(ip_dir):
for filename in filenames:
if filename.lower().endswith('.wav'):
matches.append(os.path.join(root, filename))
return matches
def read_audio(file_name):
# try to read in audio file
try:
#samp_rate_orig, audio = mywavfile.read(file_name)
samp_rate_orig, audio = wavfile.read(file_name)
#coeff, freqs = pywt.cwt(file_name, scales, 'morl')
except:
print(' Error reading file')
return True, None, None, None, None
# convert to mono if stereo
if len(audio.shape) == 2:
print(' Warning: stereo file. Just taking left channel.')
audio = audio[:, 0]
file_dur = audio.shape[0] / float(samp_rate_orig)
print(' dur', round(file_dur,3), '(secs) , fs', samp_rate_orig)
# original model is trained on time expanded data
samp_rate = samp_rate_orig
#audio = audio / (2.**15) #normalize the values
#source: https://github.com/JonasMok/Python-Machine-Learning-Cookbook/blob/master/Chapter07/extract_mfcc.py
#mfcc_features = mfcc(audio, samp_rate_orig)
#filterbank_features = logfbank(audio, samp_rate_orig)
#len_mfcc = mfcc_features.shape
#len_filter = filterbank_features.shape
spec_centroid = spectral_centroid(audio, samp_rate_orig)
#return False, audio, file_dur, samp_rate, samp_rate_orig, mfcc_features, filterbank_features, len_filter, len_mfcc, spec_centroid
return False, audio, file_dur, samp_rate, samp_rate_orig, spec_centroid
#return False, audio, file_dur, samp_rate, samp_rate_orig, spec_centroid
#--------------------------------------------------------------------------------------------------------------------------------------
def printar (audio_files, data_dir):
print('Processing ', len(audio_files), 'files')
print('Input directory ', data_dir)
# loop through audio files
def resultado(audio_files,data_dir):
results = []
for file_cnt, file_name in enumerate(audio_files):
file_name_basename = file_name[len(data_dir)+1:]
print('\n', file_cnt+1, 'of', len(audio_files), '\t', file_name_basename)
#read_fail, audio, file_dur, samp_rate, samp_rate_orig, spec_centroid = read_audio(file_name)
read_fail, audio, file_dur, samp_rate, samp_rate_orig, spec_centroid = read_audio(file_name)
if read_fail:
continue
#res = {'label': data_dir,'filename':file_name_basename, 'sample_rate':samp_rate_orig, 'spec_centroid':spec_centroid} #'coefficient':coeff, 'frequency':freqs,
#res = {'label': data_dir,'filename':file_name_basename, 'sample_rate':samp_rate_orig, 'mfcc': mfcc_features, 'filterbank':filterbank_features, 'len_mfcc':len_mfcc, 'len_filter': len_filter, 'spec_centroid':spec_centroid}
res = {'label': data_dir,'filename':file_name_basename, 'sample_rate':samp_rate_orig, 'spec_centroid':spec_centroid}
#res = {'data_dir':data_dir,'filename':file_name_basename, 'sample_rate':samp_rate_orig}
results.append(res)
return results
#-----------------------------------------------------------------------------------------------------------------------------------------------------------
# this is the path to your audio files
#data_dir = 'test'
#data_dir2 = 'test_2'
#data_dir3='test_3'
data_dir_control = 'Control 0006246'
data_dir_ground_1 = 'T1 Ground 0006199'
data_dir_nacelle_1 = 'T1 Nacelle 0006325'
data_dir_ground_5 = 'T5 Ground 0006323'
data_dir_nacelle_5 = 'T5 Nacelle take 2'
data_dir_ground_9 = 'T9 Ground 0006331'
data_dir_nacelle_9 = 'T9 Nacelle 0006364'
#audio_files = get_audio_files(data_dir_control)
#printar(audio_files, data_dir_control)
#result_1=resultado(audio_files,data_dir_control)
#audio_files_2 = get_audio_files(data_dir_ground_1)
#printar(audio_files_2, data_dir_ground_1)
#result_2=resultado(audio_files_2,data_dir_ground_1)
#audio_files_3 = get_audio_files(data_dir_nacelle_1)
#printar(audio_files_3, data_dir_nacelle_1)
#result_3=resultado(audio_files_3,data_dir_nacelle_1)
#audio_files_4 = get_audio_files(data_dir_ground_5)
#printar(audio_files_4, data_dir_ground_5)
#result_4=resultado(audio_files_4,data_dir_ground_5)
#audio_files_5 = get_audio_files(data_dir_nacelle_5)
#printar(audio_files_5, data_dir_nacelle_5)
#result_5=resultado(audio_files_5,data_dir_nacelle_5)
#audio_files_6 = get_audio_files(data_dir_ground_9)
#printar(audio_files_6, data_dir_ground_9)
#result_6=resultado(audio_files_6,data_dir_ground_9)
audio_files_7 = get_audio_files(data_dir_nacelle_9)
printar(audio_files_7, data_dir_nacelle_9)
result_7=resultado(audio_files_7,data_dir_nacelle_9)
#------------------------------------------------------------------------------------------
dt = | pd.DataFrame(result_7) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index= | pd.Index(['g1', 'g2'], dtype='object') | pandas.Index |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.