prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 01:59:54 2020
@author: iagorosa
"""
#%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from scipy import stats
from sklearn.preprocessing import MinMaxScaler
from sklearn_extensions.extreme_learning_machines.elm import ELMRegressor
import scipy.stats as scs
from statsmodels.stats.diagnostic import lilliefors
import pylab as pl
import seaborn as sns
#%%
dados= | pd.read_csv('housing.csv',sep=',',header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with | tm.assertRaises(ValueError) | pandas.util.testing.assertRaises |
import pytest
# relative import
# NOTE: run via cmd: pytest package/tests/ (in project root dir)
from .. import functions
# alternate import
# NOTE: run via cmd: pytest . (in tests dir)
#import sys
#sys.path.append("..")
#import functions
import pandas as pd
from typing import List
def test_add():
input_x: int = 2
input_y: int = 2
output: int = 4
actual = functions.add(input_x, input_y)
assert output == actual
def test_sub():
input_x: int = 10
input_y: int = 5
output: int = 5
actual = functions.sub(input_x, input_y)
assert output == actual
def test_add_type():
input_x: int = 10
input_y: int = 5
actual = functions.sub(input_x, input_y)
assert isinstance(actual, int)
def test_create_df():
input_a: List[int] = [1,2,3]
input_b: List[int] = [4,5,6]
output: pd.DataFrame = pd.DataFrame(
{
"A" : [1,2,3],
"B" : [4,5,6],
}
)
actual = functions.create_df(input_a, input_b)
assert | pd.DataFrame.equals(output, actual) | pandas.DataFrame.equals |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal, assert_series_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage,
)
from evalml.pipelines.components import TargetImputer
def test_target_imputer_no_y(X_y_binary):
X, y = X_y_binary
imputer = TargetImputer()
assert imputer.fit_transform(None, None) == (None, None)
imputer = TargetImputer()
imputer.fit(None, None)
assert imputer.transform(None, None) == (None, None)
def test_target_imputer_with_X():
X = pd.DataFrame({"some col": [1, 3, np.nan]})
y = pd.Series([np.nan, 1, 3])
imputer = TargetImputer(impute_strategy="median")
y_expected = pd.Series([2, 1, 3])
X_expected = pd.DataFrame({"some col": [1, 3, np.nan]})
X_t, y_t = imputer.fit_transform(X, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
assert_frame_equal(X_expected, X_t, check_dtype=False)
def test_target_imputer_median():
y = pd.Series([np.nan, 1, 10, 10, 6])
imputer = TargetImputer(impute_strategy="median")
y_expected = pd.Series([8, 1, 10, 10, 6])
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
def test_target_imputer_mean():
y = pd.Series([np.nan, 2, 0])
imputer = TargetImputer(impute_strategy="mean")
y_expected = pd.Series([1, 2, 0])
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
@pytest.mark.parametrize(
"fill_value, y, y_expected",
[
(None, pd.Series([np.nan, 0, 5]), pd.Series([0, 0, 5])),
(
None,
pd.Series([np.nan, "a", "b"]),
pd.Series(["missing_value", "a", "b"]).astype("category"),
),
(3, pd.Series([np.nan, 0, 5]), pd.Series([3, 0, 5])),
(3, pd.Series([np.nan, "a", "b"]), pd.Series([3, "a", "b"]).astype("category")),
],
)
def test_target_imputer_constant(fill_value, y, y_expected):
imputer = TargetImputer(impute_strategy="constant", fill_value=fill_value)
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
def test_target_imputer_most_frequent():
y = pd.Series([np.nan, "a", "b"])
imputer = TargetImputer(impute_strategy="most_frequent")
y_expected = pd.Series(["a", "a", "b"]).astype("category")
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
y = pd.Series([np.nan, 1, 1, 2])
imputer = TargetImputer(impute_strategy="most_frequent")
y_expected = pd.Series([1, 1, 1, 2])
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
def test_target_imputer_col_with_non_numeric_with_numeric_strategy():
y = pd.Series([np.nan, "a", "b"])
imputer = TargetImputer(impute_strategy="mean")
with pytest.raises(
ValueError, match="Cannot use mean strategy with non-numeric data"
):
imputer.fit_transform(None, y)
with pytest.raises(
ValueError, match="Cannot use mean strategy with non-numeric data"
):
imputer.fit(None, y)
imputer = TargetImputer(impute_strategy="median")
with pytest.raises(
ValueError, match="Cannot use median strategy with non-numeric data"
):
imputer.fit_transform(None, y)
with pytest.raises(
ValueError, match="Cannot use median strategy with non-numeric data"
):
imputer.fit(None, y)
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_target_imputer_all_bool_return_original(data_type, make_data_type):
y = pd.Series([True, True, False, True, True], dtype=bool)
y = make_data_type(data_type, y)
y_expected = pd.Series([True, True, False, True, True], dtype=bool)
imputer = TargetImputer()
imputer.fit(None, y)
_, y_t = imputer.transform(None, y)
assert_series_equal(y_expected, y_t)
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_target_imputer_boolean_dtype(data_type, make_data_type):
y = pd.Series([True, np.nan, False, np.nan, True], dtype="category")
y_expected = pd.Series([True, True, False, True, True], dtype="category")
y = make_data_type(data_type, y)
imputer = TargetImputer()
imputer.fit(None, y)
_, y_t = imputer.transform(None, y)
assert_series_equal(y_expected, y_t)
def test_target_imputer_fit_transform_all_nan_empty():
y = pd.Series([np.nan, np.nan])
imputer = TargetImputer()
imputer.fit(None, y)
with pytest.raises(RuntimeError, match="Transformed data is empty"):
imputer.transform(None, y)
imputer = TargetImputer()
with pytest.raises(RuntimeError, match="Transformed data is empty"):
imputer.fit_transform(None, y)
def test_target_imputer_numpy_input():
y = np.array([np.nan, 0, 2])
imputer = TargetImputer(impute_strategy="mean")
y_expected = np.array([1, 0, 2])
_, y_t = imputer.fit_transform(None, y)
assert np.allclose(y_expected, y_t)
np.testing.assert_almost_equal(y, np.array([np.nan, 0, 2]))
def test_target_imputer_does_not_reset_index():
y = pd.Series(np.arange(10))
y[5] = np.nan
assert y.index.tolist() == list(range(10))
y.drop(0, inplace=True)
pd.testing.assert_series_equal(
pd.Series(
[1, 2, 3, 4, np.nan, 6, 7, 8, 9], dtype=float, index=list(range(1, 10))
),
y,
)
imputer = TargetImputer(impute_strategy="mean")
imputer.fit(None, y=y)
_, y_t = imputer.transform(None, y)
pd.testing.assert_series_equal(
pd.Series([1.0, 2, 3, 4, 5, 6, 7, 8, 9], dtype=float, index=list(range(1, 10))),
y_t,
)
@pytest.mark.parametrize(
"y, y_expected",
[
(pd.Series([1, 0, 5, None]), pd.Series([1, 0, 5, 2])),
(pd.Series([0.1, 0.0, 0.5, None]), pd.Series([0.1, 0.0, 0.5, 0.2])),
],
)
def test_target_imputer_with_none(y, y_expected):
imputer = TargetImputer(impute_strategy="mean")
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
@pytest.mark.parametrize(
"y, y_expected",
[
(
pd.Series(["b", "a", "a", None], dtype="category"),
pd.Series(["b", "a", "a", "a"], dtype="category"),
),
(
pd.Series([True, None, False, True], dtype="category"),
pd.Series([True, True, False, True], dtype="category"),
),
(
| pd.Series(["b", "a", "a", None]) | pandas.Series |
"""
This module implements the intermediates computation for plot(df) function.
""" # pylint: disable=too-many-lines
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union, cast
import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
from dask.array.stats import kurtosis, skew
from nltk.stem import PorterStemmer, WordNetLemmatizer
from scipy.stats import gaussian_kde
from ...assets.english_stopwords import english_stopwords
from ...errors import UnreachableError
from ..dtypes import (
Continuous,
DateTime,
DType,
DTypeDef,
Nominal,
detect_dtype,
drop_null,
is_dtype,
)
from ..intermediate import Intermediate
from ..utils import to_dask
__all__ = ["compute"]
# Dictionary for mapping the time unit to its formatting. Each entry is of the
# form unit:(unit code for pd.Grouper freq parameter, pandas to_period strftime
# formatting for line charts, pandas to_period strftime formatting for box plot,
# label format).
DTMAP = {
"year": ("Y", "%Y", "%Y", "Year"),
"quarter": ("Q", "Q%q %Y", "Q%q %Y", "Quarter"),
"month": ("M", "%B %Y", "%b %Y", "Month"),
"week": ("W-SAT", "%d %B, %Y", "%d %b, %Y", "Week of"),
"day": ("D", "%d %B, %Y", "%d %b, %Y", "Date"),
"hour": ("H", "%d %B, %Y, %I %p", "%d %b, %Y, %I %p", "Hour"),
"minute": ("T", "%d %B, %Y, %I:%M %p", "%d %b, %Y, %I:%M %p", "Minute"),
"second": ("S", "%d %B, %Y, %I:%M:%S %p", "%d %b, %Y, %I:%M:%S %p", "Second"),
}
def compute(
df: Union[pd.DataFrame, dd.DataFrame],
x: Optional[str] = None,
y: Optional[str] = None,
z: Optional[str] = None,
*,
bins: int = 10,
ngroups: int = 10,
largest: bool = True,
nsubgroups: int = 5,
timeunit: str = "auto",
agg: str = "mean",
sample_size: int = 1000,
top_words: int = 30,
stopword: bool = True,
lemmatize: bool = False,
stem: bool = False,
value_range: Optional[Tuple[float, float]] = None,
dtype: Optional[DTypeDef] = None,
) -> Intermediate:
"""
Parameters
----------
df
Dataframe from which plots are to be generated
x: Optional[str], default None
A valid column name from the dataframe
y: Optional[str], default None
A valid column name from the dataframe
z: Optional[str], default None
A valid column name from the dataframe
bins: int, default 10
For a histogram or box plot with numerical x axis, it defines
the number of equal-width bins to use when grouping.
ngroups: int, default 10
When grouping over a categorical column, it defines the
number of groups to show in the plot. Ie, the number of
bars to show in a bar chart.
largest: bool, default True
If true, when grouping over a categorical column, the groups
with the largest count will be output. If false, the groups
with the smallest count will be output.
nsubgroups: int, default 5
If x and y are categorical columns, ngroups refers to
how many groups to show from column x, and nsubgroups refers to
how many subgroups to show from column y in each group in column x.
timeunit: str, default "auto"
Defines the time unit to group values over for a datetime column.
It can be "year", "quarter", "month", "week", "day", "hour",
"minute", "second". With default value "auto", it will use the
time unit such that the resulting number of groups is closest to 15.
agg: str, default "mean"
Specify the aggregate to use when aggregating over a numeric column
sample_size: int, default 1000
Sample size for the scatter plot
top_words: int, default 30
Specify the amount of words to show in the wordcloud and
word frequency bar chart
stopword: bool, default True
Eliminate the stopwords in the text data for plotting wordcloud and
word frequency bar chart
lemmatize: bool, default False
Lemmatize the words in the text data for plotting wordcloud and
word frequency bar chart
stem: bool, default False
Apply Potter Stem on the text data for plotting wordcloud and
word frequency bar chart
value_range: Optional[Tuple[float, float]], default None
The lower and upper bounds on the range of a numerical column.
Applies when column x is specified and column y is unspecified.
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
""" # pylint: disable=too-many-locals
df = to_dask(df)
if not any((x, y, z)):
return compute_overview(df, bins, ngroups, largest, timeunit, dtype)
if sum(v is None for v in (x, y, z)) == 2:
col: str = cast(str, x or y or z)
return compute_univariate(
df,
col,
bins,
ngroups,
largest,
timeunit,
top_words,
stopword,
lemmatize,
stem,
value_range,
dtype,
)
if sum(v is None for v in (x, y, z)) == 1:
x, y = (v for v in (x, y, z) if v is not None)
return compute_bivariate(
df,
x,
y,
bins,
ngroups,
largest,
nsubgroups,
timeunit,
agg,
sample_size,
dtype,
)
if x is not None and y is not None and z is not None:
return compute_trivariate(df, x, y, z, ngroups, largest, timeunit, agg, dtype)
return Intermediate()
def compute_overview(
df: dd.DataFrame,
bins: int,
ngroups: int,
largest: bool,
timeunit: str,
dtype: Optional[DTypeDef] = None,
) -> Intermediate:
# pylint: disable=too-many-arguments,too-many-locals
"""
Compute functions for plot(df)
Parameters
----------
df
Dataframe from which plots are to be generated
bins
For a histogram or box plot with numerical x axis, it defines
the number of equal-width bins to use when grouping.
ngroups
When grouping over a categorical column, it defines the
number of groups to show in the plot. Ie, the number of
bars to show in a bar chart.
largest
If true, when grouping over a categorical column, the groups
with the largest count will be output. If false, the groups
with the smallest count will be output.
timeunit
Defines the time unit to group values over for a datetime column.
It can be "year", "quarter", "month", "week", "day", "hour",
"minute", "second". With default value "auto", it will use the
time unit such that the resulting number of groups is closest to 15.
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
"""
# extract the first rows for checking if a column contains a mutable type
first_rows: pd.DataFrame = df.head() # dd.DataFrame.head triggers a (small) data read
datas: List[Any] = []
dtype_cnts: DefaultDict[str, int] = defaultdict(int)
col_names_dtypes: List[Tuple[str, DType]] = []
for column in df.columns:
srs = df[column]
column_dtype = detect_dtype(srs, dtype)
if is_dtype(column_dtype, Nominal()):
# cast the column as string type if it contains a mutable type
try:
first_rows[column].apply(hash)
except TypeError:
srs = df[column] = srs.dropna().astype(str)
# bar chart
datas.append(calc_bar(srs, ngroups, largest))
col_names_dtypes.append((column, Nominal()))
dtype_cnts["Categorical"] += 1
elif is_dtype(column_dtype, Continuous()):
# histogram
hist = da.histogram(drop_null(srs), bins=bins, range=[srs.min(), srs.max()])
datas.append(hist)
col_names_dtypes.append((column, Continuous()))
dtype_cnts["Numerical"] += 1
elif is_dtype(column_dtype, DateTime()):
datas.append(dask.delayed(calc_line_dt)(df[[column]], timeunit))
col_names_dtypes.append((column, DateTime()))
dtype_cnts["DateTime"] += 1
else:
raise UnreachableError
stats = calc_stats(df, dtype_cnts)
datas, stats = dask.compute(datas, stats)
data = [(col, dtp, dat) for (col, dtp), dat in zip(col_names_dtypes, datas)]
return Intermediate(data=data, stats=stats, visual_type="distribution_grid",)
def compute_univariate(
df: dd.DataFrame,
x: str,
bins: int,
ngroups: int,
largest: bool,
timeunit: str,
top_words: int,
stopword: bool = True,
lemmatize: bool = False,
stem: bool = False,
value_range: Optional[Tuple[float, float]] = None,
dtype: Optional[DTypeDef] = None,
) -> Intermediate:
"""
Compute functions for plot(df, x)
Parameters
----------
df
Dataframe from which plots are to be generated
x
A valid column name from the dataframe
bins
For a histogram or box plot with numerical x axis, it defines
the number of equal-width bins to use when grouping.
ngroups
When grouping over a categorical column, it defines the
number of groups to show in the plot. Ie, the number of
bars to show in a bar chart.
largest
If true, when grouping over a categorical column, the groups
with the largest count will be output. If false, the groups
with the smallest count will be output.
timeunit
Defines the time unit to group values over for a datetime column.
It can be "year", "quarter", "month", "week", "day", "hour",
"minute", "second". With default value "auto", it will use the
time unit such that the resulting number of groups is closest to 15.
top_words: int, default 30
Specify the amount of words to show in the wordcloud and
word frequency bar chart
stopword: bool, default True
Eliminate the stopwords in the text data for plotting wordcloud and
word frequency bar chart
lemmatize: bool, default False
Lemmatize the words in the text data for plotting wordcloud and
word frequency bar chart
stem: bool, default False
Apply Potter Stem on the text data for plotting wordcloud and
word frequency bar chart
value_range
The lower and upper bounds on the range of a numerical column.
Applies when column x is specified and column y is unspecified.
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
"""
# pylint: disable=too-many-locals, too-many-arguments
col_dtype = detect_dtype(df[x], dtype)
if is_dtype(col_dtype, Nominal()):
# extract the column
df_x = df[x]
# calculate the total rows
nrows = df_x.shape[0]
# cast the column as string type if it contains a mutable type
if df_x.head().apply(lambda x: hasattr(x, "__hash__")).any():
# drop_null() will not work if the column conatains a mutable type
df_x = df_x.dropna().astype(str)
# drop null values
df_x = drop_null(df_x)
# calc_word_freq() returns the frequency of words (for the word cloud and word
# frequency bar chart) and the total number of words
word_data = calc_word_freq(df_x, top_words, stopword, lemmatize, stem)
# calc_cat_stats() computes all the categorical stats including the length
# histogram. calc_bar_pie() does the calculations for the bar and pie charts
# NOTE this dictionary could be returned to create_report without
# calling the subsequent compute
cat_data = {
"stats": calc_cat_stats(df_x, nrows, bins),
"bar_pie": calc_bar_pie(df_x, ngroups, largest),
"word_data": word_data,
}
cat_data = dask.compute(cat_data)[0]
return Intermediate(
col=x,
stats=cat_data["stats"],
bar_pie=cat_data["bar_pie"],
word_data=cat_data["word_data"],
visual_type="categorical_column",
)
elif is_dtype(col_dtype, Continuous()):
# calculate the total number of rows then drop the missing values
nrows = df.shape[0]
df_x = drop_null(df[x])
if value_range is not None:
df_x = df_x[df_x.between(*value_range)]
# TODO perhaps we should not use to_dask() on the entire
# initial dataframe and instead only use the column of data
# df_x = df_x.repartition(partition_size="100MB")
# calculate numerical statistics and extract the min and max
num_stats = calc_num_stats(df_x, nrows)
minv, maxv = num_stats["min"], num_stats["max"]
# NOTE this dictionary could be returned to create_report without
# calling the subsequent compute
num_data = {
"hist": da.histogram(df_x, bins=bins, range=[minv, maxv]),
"kde": calc_kde(df_x, bins, minv, maxv),
"box_data": calc_box_new(df_x, num_stats["qntls"]),
"stats": num_stats,
}
num_data = dask.compute(num_data)[0]
return Intermediate(
col=x,
hist=num_data["hist"],
kde=num_data["kde"],
box_data=num_data["box_data"],
stats=num_data["stats"],
visual_type="numerical_column",
)
elif is_dtype(col_dtype, DateTime()):
data_dt: List[Any] = []
# line chart
data_dt.append(dask.delayed(calc_line_dt)(df[[x]], timeunit))
# stats
data_dt.append(dask.delayed(calc_stats_dt)(df[x]))
data, statsdata_dt = dask.compute(*data_dt)
return Intermediate(
col=x, data=data, stats=statsdata_dt, visual_type="datetime_column",
)
else:
raise UnreachableError
def compute_bivariate(
df: dd.DataFrame,
x: str,
y: str,
bins: int,
ngroups: int,
largest: bool,
nsubgroups: int,
timeunit: str,
agg: str,
sample_size: int,
dtype: Optional[DTypeDef] = None,
) -> Intermediate:
"""
Compute functions for plot(df, x, y)
Parameters
----------
df
Dataframe from which plots are to be generated
x
A valid column name from the dataframe
y
A valid column name from the dataframe
bins
For a histogram or box plot with numerical x axis, it defines
the number of equal-width bins to use when grouping.
ngroups
When grouping over a categorical column, it defines the
number of groups to show in the plot. Ie, the number of
bars to show in a bar chart.
largest
If true, when grouping over a categorical column, the groups
with the largest count will be output. If false, the groups
with the smallest count will be output.
nsubgroups
If x and y are categorical columns, ngroups refers to
how many groups to show from column x, and nsubgroups refers to
how many subgroups to show from column y in each group in column x.
timeunit
Defines the time unit to group values over for a datetime column.
It can be "year", "quarter", "month", "week", "day", "hour",
"minute", "second". With default value "auto", it will use the
time unit such that the resulting number of groups is closest to 15.
agg
Specify the aggregate to use when aggregating over a numeric column
sample_size
Sample size for the scatter plot
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
"""
# pylint: disable=too-many-arguments,too-many-locals
xtype = detect_dtype(df[x], dtype)
ytype = detect_dtype(df[y], dtype)
if (
is_dtype(xtype, Nominal())
and is_dtype(ytype, Continuous())
or is_dtype(xtype, Continuous())
and is_dtype(ytype, Nominal())
):
x, y = (x, y) if is_dtype(xtype, Nominal()) else (y, x)
df = drop_null(df[[x, y]])
df[x] = df[x].apply(str, meta=(x, str))
# box plot per group
boxdata = calc_box(df, bins, ngroups, largest, dtype)
# histogram per group
hisdata = calc_hist_by_group(df, bins, ngroups, largest)
return Intermediate(
x=x, y=y, boxdata=boxdata, histdata=hisdata, visual_type="cat_and_num_cols",
)
elif (
is_dtype(xtype, DateTime())
and is_dtype(ytype, Continuous())
or is_dtype(xtype, Continuous())
and is_dtype(ytype, DateTime())
):
x, y = (x, y) if is_dtype(xtype, DateTime()) else (y, x)
df = drop_null(df[[x, y]])
dtnum: List[Any] = []
# line chart
dtnum.append(dask.delayed(calc_line_dt)(df, timeunit, agg))
# box plot
dtnum.append(dask.delayed(calc_box_dt)(df, timeunit))
dtnum = dask.compute(*dtnum)
return Intermediate(
x=x,
y=y,
linedata=dtnum[0],
boxdata=dtnum[1],
visual_type="dt_and_num_cols",
)
elif (
is_dtype(xtype, DateTime())
and is_dtype(ytype, Nominal())
or is_dtype(xtype, Nominal())
and is_dtype(ytype, DateTime())
):
x, y = (x, y) if is_dtype(xtype, DateTime()) else (y, x)
df = drop_null(df[[x, y]])
df[y] = df[y].apply(str, meta=(y, str))
dtcat: List[Any] = []
# line chart
dtcat.append(
dask.delayed(calc_line_dt)(df, timeunit, ngroups=ngroups, largest=largest)
)
# stacked bar chart
dtcat.append(dask.delayed(calc_stacked_dt)(df, timeunit, ngroups, largest))
dtcat = dask.compute(*dtcat)
return Intermediate(
x=x,
y=y,
linedata=dtcat[0],
stackdata=dtcat[1],
visual_type="dt_and_cat_cols",
)
elif is_dtype(xtype, Nominal()) and is_dtype(ytype, Nominal()):
df = drop_null(df[[x, y]])
df[x] = df[x].apply(str, meta=(x, str))
df[y] = df[y].apply(str, meta=(y, str))
# nested bar chart
nesteddata = calc_nested(df, ngroups, nsubgroups)
# stacked bar chart
stackdata = calc_stacked(df, ngroups, nsubgroups)
# heat map
heatmapdata = calc_heatmap(df, ngroups, nsubgroups)
return Intermediate(
x=x,
y=y,
nesteddata=nesteddata,
stackdata=stackdata,
heatmapdata=heatmapdata,
visual_type="two_cat_cols",
)
elif is_dtype(xtype, Continuous()) and is_dtype(ytype, Continuous()):
df = drop_null(df[[x, y]])
# scatter plot
scatdata = calc_scatter(df, sample_size)
# hexbin plot
hexbindata = df.compute()
# box plot
boxdata = calc_box(df, bins)
return Intermediate(
x=x,
y=y,
scatdata=scatdata,
boxdata=boxdata,
hexbindata=hexbindata,
spl_sz=sample_size,
visual_type="two_num_cols",
)
else:
raise UnreachableError
def compute_trivariate(
df: dd.DataFrame,
x: str,
y: str,
z: str,
ngroups: int,
largest: bool,
timeunit: str,
agg: str,
dtype: Optional[DTypeDef] = None,
) -> Intermediate:
"""
Compute functions for plot(df, x, y, z)
Parameters
----------
df
Dataframe from which plots are to be generated
x
A valid column name from the dataframe
y
A valid column name from the dataframe
z
A valid column name from the dataframe
bins
For a histogram or box plot with numerical x axis, it defines
the number of equal-width bins to use when grouping.
ngroups
When grouping over a categorical column, it defines the
number of groups to show in the plot. Ie, the number of
bars to show in a bar chart.
largest
If true, when grouping over a categorical column, the groups
with the largest count will be output. If false, the groups
with the smallest count will be output.
timeunit
Defines the time unit to group values over for a datetime column.
It can be "year", "quarter", "month", "week", "day", "hour",
"minute", "second". With default value "auto", it will use the
time unit such that the resulting number of groups is closest to 15.
agg
Specify the aggregate to use when aggregating over a numeric column
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
"""
# pylint: disable=too-many-arguments
xtype = detect_dtype(df[x], dtype)
ytype = detect_dtype(df[y], dtype)
ztype = detect_dtype(df[z], dtype)
if (
is_dtype(xtype, DateTime())
and is_dtype(ytype, Nominal())
and is_dtype(ztype, Continuous())
):
y, z = z, y
elif (
is_dtype(xtype, Continuous())
and is_dtype(ytype, DateTime())
and is_dtype(ztype, Nominal())
):
x, y = y, x
elif (
is_dtype(xtype, Continuous())
and is_dtype(ytype, Nominal())
and is_dtype(ztype, DateTime())
):
x, y, z = z, x, y
elif (
is_dtype(xtype, Nominal())
and is_dtype(ytype, DateTime())
and is_dtype(ztype, Continuous())
):
x, y, z = y, z, x
elif (
is_dtype(xtype, Nominal())
and is_dtype(ytype, Continuous())
and is_dtype(ztype, DateTime())
):
x, z = z, x
assert (
is_dtype(xtype, DateTime())
and is_dtype(ytype, Continuous())
and is_dtype(ztype, Nominal())
), "x, y, and z must be one each of type datetime, numerical, and categorical"
df = drop_null(df[[x, y, z]])
df[z] = df[z].apply(str, meta=(z, str))
# line chart
data = dask.compute(dask.delayed(calc_line_dt)(df, timeunit, agg, ngroups, largest))
return Intermediate(
x=x, y=y, z=z, agg=agg, data=data[0], visual_type="dt_cat_num_cols",
)
def calc_line_dt(
df: dd.DataFrame,
unit: str,
agg: Optional[str] = None,
ngroups: Optional[int] = None,
largest: Optional[bool] = None,
) -> Union[
Tuple[pd.DataFrame, Dict[str, int], str],
Tuple[pd.DataFrame, str, float],
Tuple[pd.DataFrame, str],
]:
"""
Calculate a line or multiline chart with date on the x axis. If df contains
one datetime column, it will make a line chart of the frequency of values. If
df contains a datetime and categorical column, it will compute the frequency
of each categorical value in each time group. If df contains a datetime and
numerical column, it will compute the aggregate of the numerical column grouped
by the time groups. If df contains a datetime, categorical, and numerical column,
it will compute the aggregate of the numerical column for values in the categorical
column grouped by time.
Parameters
----------
df
A dataframe
unit
The unit of time over which to group the values
agg
Aggregate to use for the numerical column
ngroups
Number of groups for the categorical column
largest
Use the largest or smallest groups in the categorical column
"""
# pylint: disable=too-many-locals
x = df.columns[0] # time column
unit = _get_timeunit(df[x].min(), df[x].max(), 100) if unit == "auto" else unit
if unit not in DTMAP.keys():
raise ValueError
grouper = pd.Grouper(key=x, freq=DTMAP[unit][0]) # for grouping the time values
# multiline charts
if ngroups and largest:
hist_dict: Dict[str, Tuple[np.ndarray, np.ndarray, List[str]]] = dict()
hist_lst: List[Tuple[np.ndarray, np.ndarray, List[str]]] = list()
agg = (
"freq" if agg is None else agg
) # default agg if unspecified for notational concision
# categorical column for grouping over, each resulting group is a line in the chart
grpby_col = df.columns[1] if len(df.columns) == 2 else df.columns[2]
df, grp_cnt_stats, largest_grps = _calc_groups(df, grpby_col, ngroups, largest)
groups = df.groupby([grpby_col])
for grp in largest_grps:
srs = groups.get_group(grp)
# calculate the frequencies or aggregate value in each time group
if len(df.columns) == 3:
dfr = srs.groupby(grouper)[df.columns[1]].agg(agg).reset_index()
else:
dfr = srs[x].to_frame().groupby(grouper).size().reset_index()
dfr.columns = [x, agg]
# if grouping by week, make the label for the week the beginning Sunday
dfr[x] = dfr[x] - pd.to_timedelta(6, unit="d") if unit == "week" else dfr[x]
# format the label
dfr["lbl"] = dfr[x].dt.to_period("S").dt.strftime(DTMAP[unit][1])
hist_lst.append((list(dfr[agg]), list(dfr[x]), list(dfr["lbl"])))
hist_lst = dask.compute(*hist_lst)
for elem in zip(largest_grps, hist_lst):
hist_dict[elem[0]] = elem[1]
return hist_dict, grp_cnt_stats, DTMAP[unit][3]
# single line charts
if agg is None: # frequency of datetime column
miss_pct = round(df[x].isna().sum() / len(df) * 100, 1)
dfr = drop_null(df).groupby(grouper).size().reset_index()
dfr.columns = [x, "freq"]
dfr["pct"] = dfr["freq"] / len(df) * 100
else: # aggregate over a second column
dfr = df.groupby(grouper)[df.columns[1]].agg(agg).reset_index()
dfr.columns = [x, agg]
dfr[x] = dfr[x] - pd.to_timedelta(6, unit="d") if unit == "week" else dfr[x]
dfr["lbl"] = dfr[x].dt.to_period("S").dt.strftime(DTMAP[unit][1])
return (dfr, DTMAP[unit][3], miss_pct) if agg is None else (dfr, DTMAP[unit][3])
def calc_box_dt(
df: dd.DataFrame, unit: str
) -> Tuple[pd.DataFrame, List[str], List[float], str]:
"""
Calculate a box plot with date on the x axis.
Parameters
----------
df
A dataframe with one datetime and one numerical column
unit
The unit of time over which to group the values
"""
x, y = df.columns[0], df.columns[1] # time column
unit = _get_timeunit(df[x].min(), df[x].max(), 10) if unit == "auto" else unit
if unit not in DTMAP.keys():
raise ValueError
grps = df.groupby(pd.Grouper(key=x, freq=DTMAP[unit][0])) # time groups
# box plot for the values in each time group
df = pd.concat([_calc_box_stats(g[1][y], g[0], True) for g in grps], axis=1,)
df = df.append(pd.Series({c: i + 1 for i, c in enumerate(df.columns)}, name="x",)).T
# If grouping by week, make the label for the week the beginning Sunday
df.index = df.index - pd.to_timedelta(6, unit="d") if unit == "week" else df.index
df.index.name = "grp"
df = df.reset_index()
df["grp"] = df["grp"].dt.to_period("S").dt.strftime(DTMAP[unit][2])
df["x0"], df["x1"] = df["x"] - 0.8, df["x"] - 0.2 # width of whiskers for plotting
outx, outy = _calc_box_otlrs(df)
return df, outx, outy, DTMAP[unit][3]
def calc_stacked_dt(
df: dd.DataFrame, unit: str, ngroups: int, largest: bool,
) -> Tuple[pd.DataFrame, Dict[str, int], str]:
"""
Calculate a stacked bar chart with date on the x axis
Parameters
----------
df
A dataframe with one datetime and one categorical column
unit
The unit of time over which to group the values
ngroups
Number of groups for the categorical column
largest
Use the largest or smallest groups in the categorical column
"""
# pylint: disable=too-many-locals
x, y = df.columns[0], df.columns[1] # time column
unit = _get_timeunit(df[x].min(), df[x].max(), 10) if unit == "auto" else unit
if unit not in DTMAP.keys():
raise ValueError
# get the largest groups
df_grps, grp_cnt_stats, _ = _calc_groups(df, y, ngroups, largest)
grouper = (pd.Grouper(key=x, freq=DTMAP[unit][0]),) # time grouper
# pivot table of counts with date groups as index and categorical values as column names
dfr = pd.pivot_table(
df_grps, index=grouper, columns=y, aggfunc=len, fill_value=0,
).rename_axis(None)
# if more than ngroups categorical values, aggregate the smallest groups into "Others"
if grp_cnt_stats[f"{y}_ttl"] > grp_cnt_stats[f"{y}_shw"]:
grp_cnts = df.groupby(pd.Grouper(key=x, freq=DTMAP[unit][0])).size()
dfr["Others"] = grp_cnts - dfr.sum(axis=1)
dfr.index = ( # If grouping by week, make the label for the week the beginning Sunday
dfr.index - pd.to_timedelta(6, unit="d") if unit == "week" else dfr.index
)
dfr.index = dfr.index.to_period("S").strftime(DTMAP[unit][2]) # format labels
return dfr, grp_cnt_stats, DTMAP[unit][3]
def calc_bar(
srs: dd.Series, ngroups: int, largest: bool
) -> Tuple[dd.DataFrame, dd.core.Scalar, dd.core.Scalar]:
"""
Calculates the counts of categorical values, the total number of
categorical values, and the number of non-null cells required
for a bar chart in plot(df).
Parameters
----------
srs
One categorical column
ngroups
Number of groups to return
largest
If true, show the groups with the largest count,
else show the groups with the smallest count
"""
# drop null values
srs_present = drop_null(srs)
# number of present (not null) values
npresent = srs_present.shape[0]
# counts of unique values in the series
grps = srs_present.value_counts(sort=False)
# total number of groups
ttl_grps = grps.shape[0]
# select the largest or smallest groups
fnl_grp_cnts = grps.nlargest(ngroups) if largest else grps.nsmallest(ngroups)
return fnl_grp_cnts.to_frame(), ttl_grps, npresent
def calc_bar_pie(
srs: dd.Series, ngroups: int, largest: bool
) -> Tuple[dd.DataFrame, dd.core.Scalar]:
"""
Calculates the counts of categorical values and the total number of
categorical values required for the bar and pie charts in plot(df, x).
Parameters
----------
srs
One categorical column
ngroups
Number of groups to return
largest
If true, show the groups with the largest count,
else show the groups with the smallest count
"""
# counts of unique values in the series
grps = srs.value_counts(sort=False)
# total number of groups
ttl_grps = grps.shape[0]
# select the largest or smallest groups
fnl_grp_cnts = grps.nlargest(ngroups) if largest else grps.nsmallest(ngroups)
return fnl_grp_cnts.to_frame(), ttl_grps
def calc_word_freq(
srs: dd.Series,
top_words: int = 30,
stopword: bool = True,
lemmatize: bool = False,
stem: bool = False,
) -> Tuple[dd.Series, dd.core.Scalar]:
"""
Parse a categorical column of text data into words, and then
compute the frequency distribution of words and the total
number of words.
Parameters
----------
srs
One categorical column
top_words
Number of highest frequency words to show in the
wordcloud and word frequency bar chart
stopword
If True, remove stop words, else keep them
lemmatize
If True, lemmatize the words before computing
the word frequencies, else don't
stem
If True, extract the stem of the words before
computing the word frequencies, else don't
"""
# pylint: disable=unnecessary-lambda
if stopword:
# use a regex to replace stop words with empty string
srs = srs.str.replace(r"\b(?:{})\b".format("|".join(english_stopwords)), "")
# replace all non-alphanumeric characters with an empty string, and convert to lowercase
srs = srs.str.replace(r"[^\w+ ]", "").str.lower()
# split each string on whitespace into words then apply "explode()" to "stack" all
# the words into a series
# NOTE this is slow. One possibly better solution: after .split(), count the words
# immediately rather than create a new series with .explode() and apply
# .value_counts()
srs = srs.str.split().explode()
# lemmatize and stem
if lemmatize or stem:
srs = srs.dropna()
if lemmatize:
lem = WordNetLemmatizer()
srs = srs.apply(lambda x: lem.lemmatize(x), meta=(srs.name, "object"))
if stem:
porter = PorterStemmer()
srs = srs.apply(lambda x: porter.stem(x), meta=(srs.name, "object"))
# counts of words, excludes null values
word_cnts = srs.value_counts(sort=False)
# total number of words
nwords = word_cnts.sum()
# words with the highest frequency
fnl_word_cnts = word_cnts.nlargest(n=top_words)
return fnl_word_cnts, nwords
def calc_kde(
srs: dd.Series, bins: int, minv: float, maxv: float,
) -> Tuple[Tuple[da.core.Array, da.core.Array], np.ndarray]:
"""
Calculate a density histogram and its corresponding kernel density
estimate over a given series. The kernel is Gaussian.
Parameters
----------
data
One numerical column over which to compute the histogram and kde
bins
Number of bins to use in the histogram
"""
# compute the density histogram
hist = da.histogram(srs, bins=bins, range=[minv, maxv], density=True)
# probability density function for the series
# NOTE gaussian_kde triggers a .compute()
try:
kde = gaussian_kde(
srs.map_partitions(lambda x: x.sample(min(1000, x.shape[0])), meta=srs)
)
except np.linalg.LinAlgError:
kde = None
return hist, kde
def calc_box_new(srs: dd.Series, qntls: dd.Series) -> Dict[str, Any]:
"""
Calculate the data required for a box plot
Parameters
----------
srs
One numerical column from which to compute the box plot data
qntls
Quantiles from the normal Q-Q plot
"""
# box plot stats
# inter-quartile range
# TODO figure out how to extract a scalar from a Dask series without using a function like sum()
qrtl1 = qntls.loc[0.25].sum()
qrtl3 = qntls.loc[0.75].sum()
iqr = qrtl3 - qrtl1
srs_iqr = srs[srs.between(qrtl1 - 1.5 * iqr, qrtl3 + 1.5 * iqr)]
# outliers
otlrs = srs[~srs.between(qrtl1 - 1.5 * iqr, qrtl3 + 1.5 * iqr)]
# randomly sample at most 100 outliers from each partition without replacement
otlrs = otlrs.map_partitions(lambda x: x.sample(min(100, x.shape[0])), meta=otlrs)
box_data = {
"grp": srs.name,
"q1": qrtl1,
"q2": qntls.loc[0.5].sum(),
"q3": qrtl3,
"lw": srs_iqr.min(),
"uw": srs_iqr.max(),
"otlrs": otlrs.values,
"x": 1, # x, x0, and x1 are for plotting the box plot with bokeh
"x0": 0.2,
"x1": 0.8,
}
return box_data
def calc_stats(
df: dd.DataFrame, dtype_cnts: Dict[str, int]
) -> Dict[str, Union[int, dd.core.Scalar, Dict[str, int]]]:
"""
Calculate the statistics for plot(df) from a DataFrame
Parameters
----------
df
a DataFrame
dtype_cnts
a dictionary that contains the count for each type
"""
stats = {
"nrows": df.shape[0],
"ncols": df.shape[1],
"npresent_cells": df.count().sum(),
"nrows_wo_dups": df.drop_duplicates().shape[0],
"mem_use": df.memory_usage(deep=True).sum(),
"dtype_cnts": dtype_cnts,
}
return stats
def calc_num_stats(srs: dd.Series, nrows: dd.core.Scalar,) -> Dict[str, Any]:
"""
Calculate statistics for a numerical column
Parameters
----------
srs
a numerical column
nrows
number of rows in the column before dropping null values
"""
stats = {
"nrows": nrows,
"npresent": srs.shape[0],
"nunique": srs.nunique(),
"ninfinite": ((srs == np.inf) | (srs == -np.inf)).sum(),
"nzero": (srs == 0).sum(),
"min": srs.min(),
"max": srs.max(),
"qntls": srs.quantile(np.linspace(0.01, 0.99, 99)),
"mean": srs.mean(),
"std": srs.std(),
"skew": skew(srs),
"kurt": kurtosis(srs),
"mem_use": srs.memory_usage(),
}
return stats
def calc_cat_stats(
srs: dd.Series, nrows: int, bins: int,
) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
"""
Calculate stats for a categorical column
Parameters
----------
srs
a categorical column
nrows
number of rows before dropping null values
bins
number of bins for the category length frequency histogram
"""
# overview stats
stats = {
"nrows": nrows,
"npresent": srs.shape[0],
"nunique": srs.nunique(),
"mem_use": srs.memory_usage(),
"first_rows": srs.loc[:4],
}
# length stats
lengths = srs.str.len()
minv, maxv = lengths.min(), lengths.max()
hist = da.histogram(lengths.values, bins=bins, range=[minv, maxv])
length_stats = {
"Mean": lengths.mean(),
"Median": lengths.quantile(0.5),
"Minimum": minv,
"Maximum": maxv,
"hist": hist,
}
# letter stats
letter_stats = {
"Count": srs.str.count(r"[a-zA-Z]").sum(),
"Lowercase Letter": srs.str.count(r"[a-z]").sum(),
"Space Separator": srs.str.count(r"[ ]").sum(),
"Uppercase Letter": srs.str.count(r"[A-Z]").sum(),
"Dash Punctuation": srs.str.count(r"[-]").sum(),
"Decimal Number": srs.str.count(r"[0-9]").sum(),
}
return stats, length_stats, letter_stats
def calc_box(
df: dd.DataFrame,
bins: int,
ngroups: int = 10,
largest: bool = True,
dtype: Optional[DTypeDef] = None,
) -> Tuple[pd.DataFrame, List[str], List[float], Optional[Dict[str, int]]]:
"""
Compute a box plot over either
1) the values in one column
2) the values corresponding to groups in another column
3) the values corresponding to binning another column
Parameters
----------
df
Dataframe with one or two columns
bins
Number of bins to use if df has two numerical columns
ngroups
Number of groups to show if df has a categorical and numerical column
largest
When calculating a box plot per group, select the largest or smallest groups
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
Returns
-------
Tuple[pd.DataFrame, List[str], List[float], Dict[str, int]]
The box plot statistics in a dataframe, a list of the outlier
groups and another list of the outlier values, a dictionary
logging the sampled group output
"""
# pylint: disable=too-many-locals
grp_cnt_stats = None # to inform the user of sampled output
x = df.columns[0]
if len(df.columns) == 1:
df = _calc_box_stats(df[x], x)
else:
y = df.columns[1]
if is_dtype(detect_dtype(df[x], dtype), Continuous()) and is_dtype(
detect_dtype(df[y], dtype), Continuous()
):
minv, maxv, cnt = dask.compute(df[x].min(), df[x].max(), df[x].nunique())
bins = cnt if cnt < bins else bins
endpts = np.linspace(minv, maxv, num=bins + 1)
# calculate a box plot over each bin
df = dd.concat(
[
_calc_box_stats(
df[(df[x] >= endpts[i]) & (df[x] < endpts[i + 1])][y],
f"[{endpts[i]},{endpts[i + 1]})",
)
if i != len(endpts) - 2
else _calc_box_stats(
df[(df[x] >= endpts[i]) & (df[x] <= endpts[i + 1])][y],
f"[{endpts[i]},{endpts[i + 1]}]",
)
for i in range(len(endpts) - 1)
],
axis=1,
).compute()
endpts_df = pd.DataFrame(
[endpts[:-1], endpts[1:]], ["lb", "ub"], df.columns
)
df = pd.concat([df, endpts_df], axis=0)
else:
df, grp_cnt_stats, largest_grps = _calc_groups(df, x, ngroups, largest)
# calculate a box plot over each group
df = dd.concat(
[_calc_box_stats(df[df[x] == grp][y], grp) for grp in largest_grps],
axis=1,
).compute()
df = df.append(pd.Series({c: i + 1 for i, c in enumerate(df.columns)}, name="x",)).T
df.index.name = "grp"
df = df.reset_index()
df["x0"], df["x1"] = df["x"] - 0.8, df["x"] - 0.2 # width of whiskers for plotting
outx, outy = _calc_box_otlrs(df)
return df, outx, outy, grp_cnt_stats
def calc_hist_by_group(
df: dd.DataFrame, bins: int, ngroups: int, largest: bool
) -> Tuple[pd.DataFrame, Dict[str, int]]:
"""
Compute a histogram over the values corresponding to the groups in another column
Parameters
----------
df
Dataframe with one categorical and one numerical column
bins
Number of bins to use in the histogram
ngroups
Number of groups to show from the categorical column
largest
Select the largest or smallest groups
Returns
-------
Tuple[pd.DataFrame, Dict[str, int]]
The histograms in a dataframe and a dictionary
logging the sampled group output
"""
# pylint: disable=too-many-locals
hist_dict: Dict[str, Tuple[np.ndarray, np.ndarray, List[str]]] = dict()
hist_lst: List[Tuple[np.ndarray, np.ndarray, List[str]]] = list()
df, grp_cnt_stats, largest_grps = _calc_groups(df, df.columns[0], ngroups, largest)
# create a histogram for each group
groups = df.groupby([df.columns[0]])
minv, maxv = dask.compute(df[df.columns[1]].min(), df[df.columns[1]].max())
for grp in largest_grps:
grp_srs = groups.get_group(grp)[df.columns[1]]
hist_arr, bins_arr = da.histogram(grp_srs, range=[minv, maxv], bins=bins)
intervals = _format_bin_intervals(bins_arr)
hist_lst.append((hist_arr, bins_arr, intervals))
hist_lst = dask.compute(*hist_lst)
for elem in zip(largest_grps, hist_lst):
hist_dict[elem[0]] = elem[1]
return hist_dict, grp_cnt_stats
def calc_scatter(df: dd.DataFrame, sample_size: int) -> pd.DataFrame:
"""
Extracts the points to use in a scatter plot
Parameters
----------
df
Dataframe with two numerical columns
sample_size
the number of points to randomly sample in the scatter plot
Returns
-------
pd.DataFrame
A dataframe containing the scatter points
"""
if len(df) > sample_size:
df = df.sample(frac=sample_size / len(df))
return df.compute()
def calc_nested(
df: dd.DataFrame, ngroups: int, nsubgroups: int,
) -> Tuple[pd.DataFrame, Dict[str, int]]:
"""
Calculate a nested bar chart of the counts of two columns
Parameters
----------
df
Dataframe with two categorical columns
ngroups
Number of groups to show from the first column
nsubgroups
Number of subgroups (from the second column) to show in each group
Returns
-------
Tuple[pd.DataFrame, Dict[str, int]]
The bar chart counts in a dataframe and a dictionary
logging the sampled group output
"""
x, y = df.columns[0], df.columns[1]
df, grp_cnt_stats, _ = _calc_groups(df, x, ngroups)
df2 = df.groupby([x, y]).size().reset_index()
max_subcol_cnt = df2.groupby(x).size().max().compute()
df2.columns = [x, y, "cnt"]
df_res = (
df2.groupby(x)[[y, "cnt"]]
.apply(
lambda x: x.nlargest(n=nsubgroups, columns="cnt"),
meta=({y: "f8", "cnt": "i8"}),
)
.reset_index()
.compute()
)
df_res["grp_names"] = list(zip(df_res[x], df_res[y]))
df_res = df_res.drop([x, "level_1", y], axis=1)
grp_cnt_stats[f"{y}_ttl"] = max_subcol_cnt
grp_cnt_stats[f"{y}_shw"] = min(max_subcol_cnt, nsubgroups)
return df_res, grp_cnt_stats
def calc_stacked(
df: dd.DataFrame, ngroups: int, nsubgroups: int,
) -> Tuple[pd.DataFrame, Dict[str, int]]:
"""
Calculate a stacked bar chart of the counts of two columns
Parameters
----------
df
two categorical columns
ngroups
number of groups to show from the first column
nsubgroups
number of subgroups (from the second column) to show in each group
Returns
-------
Tuple[pd.DataFrame, Dict[str, int]]
The bar chart counts in a dataframe and a dictionary
logging the sampled group output
"""
x, y = df.columns[0], df.columns[1]
df, grp_cnt_stats, largest_grps = _calc_groups(df, x, ngroups)
fin_df = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import seaborn as sns
import numpy as np
import matplotlib.dates as mdates
import datetime
#sns.set(color_codes=True)
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
import statistics as st
sns.set_style('whitegrid', {'axes.linewidth' : 0.5})
from statsmodels.distributions.empirical_distribution import ECDF
import scipy
import gc
import sys
import re
mpl.rcParams['pdf.fonttype'] = 42
today = dt.datetime.today()
#datetoday = date(today.year, today.month, today.day)
from processing_helpers import *
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths()
def load_sim_data(exp_name, input_wdir=None, input_sim_output_path=None, column_list=None):
input_wdir = input_wdir or wdir
sim_output_path_base = os.path.join(input_wdir, 'simulation_output', exp_name)
sim_output_path = input_sim_output_path or sim_output_path_base
df = pd.read_csv(os.path.join(sim_output_path, 'trajectoriesDat.csv'), usecols=column_list)
return df
first_date = dt.datetime(day=6,month=9,year=2020)
column_list = ['scen_num', 'run_num', 'campus_quarantine_pop', 'campus_isolation_pop', 'detection_rate_official'] #'reopening_multiplier_4'
def get_probs(exp_name):
trajectories = load_sim_data(exp_name, column_list=column_list) #pd.read_csv('trajectoriesDat_200814_1.csv', usecols=column_list)
#filedate = get_latest_filedate()
qi_path=os.path.join(datapath, 'covid_modeling_northwestern', '201118_QI_tracking.csv')
qi = pd.read_csv(qi_path)
tests = pd.read_csv(os.path.join(datapath, 'covid_modeling_northwestern', 'Depersonalized_Test_Result.csv'))
idx1 = pd.date_range(first_date, pd.to_datetime(np.max(tests['ORDER_DATE'])))
tests['result'] = [str(d).lower() for d in tests['RESULT'].values]
tests_campus_pos = tests[(tests['UNDERGRAD_FLAG'] == 'Undergrad') & (tests['result'] == 'detected')]
positive_daily = tests_campus_pos.groupby('ORDER_DATE').agg({'ORDER_ID': pd.Series.nunique})
positive_daily['specimen_date'] = | pd.to_datetime(positive_daily.index) | pandas.to_datetime |
'''
Preprocessing Tranformers Based on sci-kit's API
By <NAME>
Created on June 12, 2017
'''
import copy
import pandas as pd
import numpy as np
import transforms3d as t3d
import scipy.ndimage.filters as filters
from sklearn.base import BaseEstimator, TransformerMixin
from analysis.pymo.rotation_tools import Rotation, euler2expmap, euler2expmap2, expmap2euler, euler_reorder, unroll
from analysis.pymo.Quaternions import Quaternions
from analysis.pymo.Pivots import Pivots
class MocapParameterizer(BaseEstimator, TransformerMixin):
def __init__(self, param_type = 'euler'):
'''
param_type = {'euler', 'quat', 'expmap', 'position', 'expmap2pos'}
'''
self.param_type = param_type
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("MocapParameterizer: " + self.param_type)
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._to_expmap(X)
elif self.param_type == 'quat':
return X
elif self.param_type == 'position':
return self._to_pos(X)
elif self.param_type == 'expmap2pos':
return self._expmap_to_pos(X)
else:
raise 'param types: euler, quat, expmap, position, expmap2pos'
# return X
def inverse_transform(self, X, copy=None):
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._expmap_to_euler(X)
elif self.param_type == 'quat':
raise 'quat2euler is not supported'
elif self.param_type == 'position':
# raise 'positions 2 eulers is not supported'
print('positions 2 eulers is not supported')
return X
else:
raise 'param types: euler, quat, expmap, position'
def _to_pos(self, X):
'''Converts joints rotations in Euler angles to joint positions'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
pos_df = pd.DataFrame(index=euler_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
# List the columns that contain position channels
pos_cols = [c for c in euler_df.columns if ('position' in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
rot_order = track.skeleton[joint]['order']
#print("rot_order:" + joint + " :" + rot_order)
# Get the rotation columns that belong to this joint
rc = euler_df[[c for c in rot_cols if joint in c]]
# Get the position columns that belong to this joint
pc = euler_df[[c for c in pos_cols if joint in c]]
# Make sure the columns are organized in xyz order
if rc.shape[1] < 3:
euler_values = np.zeros((euler_df.shape[0], 3))
rot_order = "XYZ"
else:
euler_values = np.pi/180.0*np.transpose(np.array([track.values['%s_%srotation'%(joint, rot_order[0])], track.values['%s_%srotation'%(joint, rot_order[1])], track.values['%s_%srotation'%(joint, rot_order[2])]]))
if pc.shape[1] < 3:
pos_values = np.asarray([[0,0,0] for f in pc.iterrows()])
else:
pos_values =np.asarray([[f[1]['%s_Xposition'%joint],
f[1]['%s_Yposition'%joint],
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()])
quats = Quaternions.from_euler(np.asarray(euler_values), order=rot_order.lower(), world=False)
tree_data[joint]=[
[], # to store the rotation matrix
[] # to store the calculated position
]
if track.root_name == joint:
tree_data[joint][0] = quats#rotmats
# tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])
tree_data[joint][1] = pos_values
else:
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
tree_data[joint][0] = tree_data[parent][0]*quats# np.matmul(rotmats, tree_data[parent][0])
# add the position channel to the offset and store it in k, for every frame i
k = pos_values + np.asarray(track.skeleton[joint]['offsets'])
# multiply k to the rotmat of the parent for every frame i
q = tree_data[parent][0]*k #np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])
# add q to the position of the parent, for every frame i
tree_data[joint][1] = tree_data[parent][1] + q #q.reshape(k.shape[0],3) + tree_data[parent][1]
# Create the corresponding columns in the new DataFrame
pos_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in tree_data[joint][1]], index=pos_df.index)
new_track = track.clone()
new_track.values = pos_df
Q.append(new_track)
return Q
def _expmap2rot(self, expmap):
theta = np.linalg.norm(expmap, axis=1, keepdims=True)
nz = np.nonzero(theta)[0]
expmap[nz,:] = expmap[nz,:]/theta[nz]
nrows=expmap.shape[0]
x = expmap[:,0]
y = expmap[:,1]
z = expmap[:,2]
s = np.sin(theta*0.5).reshape(nrows)
c = np.cos(theta*0.5).reshape(nrows)
rotmats = np.zeros((nrows, 3, 3))
rotmats[:,0,0] = 2*(x*x-1)*s*s+1
rotmats[:,0,1] = 2*x*y*s*s-2*z*c*s
rotmats[:,0,2] = 2*x*z*s*s+2*y*c*s
rotmats[:,1,0] = 2*x*y*s*s+2*z*c*s
rotmats[:,1,1] = 2*(y*y-1)*s*s+1
rotmats[:,1,2] = 2*y*z*s*s-2*x*c*s
rotmats[:,2,0] = 2*x*z*s*s-2*y*c*s
rotmats[:,2,1] = 2*y*z*s*s+2*x*c*s
rotmats[:,2,2] = 2*(z*z-1)*s*s+1
return rotmats
def _expmap_to_pos(self, X):
'''Converts joints rotations in expmap notation to joint positions'''
Q = []
for track in X:
channels = []
titles = []
exp_df = track.values
# Create a new DataFrame to store the exponential map rep
pos_df = pd.DataFrame(index=exp_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
if 'Nub' not in joint:
r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint
expmap = r.values
#expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()]
else:
expmap = np.zeros((exp_df.shape[0], 3))
# Convert the eulers to rotation matrices
#rotmats = np.asarray([Rotation(f, 'expmap').rotmat for f in expmap])
#angs = np.linalg.norm(expmap,axis=1, keepdims=True)
rotmats = self._expmap2rot(expmap)
tree_data[joint]=[
[], # to store the rotation matrix
[] # to store the calculated position
]
pos_values = np.zeros((exp_df.shape[0], 3))
if track.root_name == joint:
tree_data[joint][0] = rotmats
# tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])
tree_data[joint][1] = pos_values
else:
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
tree_data[joint][0] = np.matmul(rotmats, tree_data[parent][0])
# add the position channel to the offset and store it in k, for every frame i
k = pos_values + track.skeleton[joint]['offsets']
# multiply k to the rotmat of the parent for every frame i
q = np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])
# add q to the position of the parent, for every frame i
tree_data[joint][1] = q.reshape(k.shape[0],3) + tree_data[parent][1]
# Create the corresponding columns in the new DataFrame
pos_df['%s_Xposition'%joint] = pd.Series(data=tree_data[joint][1][:,0], index=pos_df.index)
pos_df['%s_Yposition'%joint] = pd.Series(data=tree_data[joint][1][:,1], index=pos_df.index)
pos_df['%s_Zposition'%joint] = pd.Series(data=tree_data[joint][1][:,2], index=pos_df.index)
new_track = track.clone()
new_track.values = pos_df
Q.append(new_track)
return Q
def _to_expmap(self, X):
'''Converts Euler angles to Exponential Maps'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
exp_df = euler_df.copy()# pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
#rxp = '%s_Xposition'%track.root_name
#ryp = '%s_Yposition'%track.root_name
#rzp = '%s_Zposition'%track.root_name
#exp_df[rxp] = pd.Series(data=euler_df[rxp], index=exp_df.index)
#exp_df[ryp] = pd.Series(data=euler_df[ryp], index=exp_df.index)
#exp_df[rzp] = pd.Series(data=euler_df[rzp], index=exp_df.index)
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
#print(joint)
r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
rot_order = track.skeleton[joint]['order']
r1_col = '%s_%srotation'%(joint, rot_order[0])
r2_col = '%s_%srotation'%(joint, rot_order[1])
r3_col = '%s_%srotation'%(joint, rot_order[2])
exp_df.drop([r1_col, r2_col, r3_col], axis=1, inplace=True)
euler = [[f[1][r1_col], f[1][r2_col], f[1][r3_col]] for f in r.iterrows()]
#exps = [Rotation(f, 'euler', from_deg=True, order=rot_order).to_expmap() for f in euler] # Convert the eulers to exp maps
exps = unroll(np.array([euler2expmap(f, rot_order, True) for f in euler])) # Convert the exp maps to eulers
# exps = np.array([euler2expmap(f, rot_order, True) for f in euler]) # Convert the exp maps to eulers
#exps = euler2expmap2(euler, rot_order, True) # Convert the eulers to exp maps
# Create the corresponding columns in the new DataFrame
exp_df.insert(loc=0, column='%s_gamma'%joint, value=pd.Series(data=[e[2] for e in exps], index=exp_df.index))
exp_df.insert(loc=0, column='%s_beta'%joint, value=pd.Series(data=[e[1] for e in exps], index=exp_df.index))
exp_df.insert(loc=0, column='%s_alpha'%joint, value=pd.Series(data=[e[0] for e in exps], index=exp_df.index))
#print(exp_df.columns)
new_track = track.clone()
new_track.values = exp_df
Q.append(new_track)
return Q
def _expmap_to_euler(self, X):
Q = []
for track in X:
channels = []
titles = []
exp_df = track.values
# Create a new DataFrame to store the exponential map rep
#euler_df = pd.DataFrame(index=exp_df.index)
euler_df = exp_df.copy()
# Copy the root positions into the new DataFrame
#rxp = '%s_Xposition'%track.root_name
#ryp = '%s_Yposition'%track.root_name
#rzp = '%s_Zposition'%track.root_name
#euler_df[rxp] = pd.Series(data=exp_df[rxp], index=euler_df.index)
#euler_df[ryp] = pd.Series(data=exp_df[ryp], index=euler_df.index)
#euler_df[rzp] = pd.Series(data=exp_df[rzp], index=euler_df.index)
# List the columns that contain rotation channels
exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint
euler_df.drop(['%s_alpha'%joint, '%s_beta'%joint, '%s_gamma'%joint], axis=1, inplace=True)
expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order
rot_order = track.skeleton[joint]['order']
#euler_rots = [Rotation(f, 'expmap').to_euler(True, rot_order) for f in expmap] # Convert the exp maps to eulers
euler_rots = [expmap2euler(f, rot_order, True) for f in expmap] # Convert the exp maps to eulers
# Create the corresponding columns in the new DataFrame
euler_df['%s_%srotation'%(joint, rot_order[0])] = pd.Series(data=[e[0] for e in euler_rots], index=euler_df.index)
euler_df['%s_%srotation'%(joint, rot_order[1])] = pd.Series(data=[e[1] for e in euler_rots], index=euler_df.index)
euler_df['%s_%srotation'%(joint, rot_order[2])] = pd.Series(data=[e[2] for e in euler_rots], index=euler_df.index)
new_track = track.clone()
new_track.values = euler_df
Q.append(new_track)
return Q
class Mirror(BaseEstimator, TransformerMixin):
def __init__(self, axis="X", append=True):
"""
Mirrors the data
"""
self.axis = axis
self.append = append
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("Mirror: " + self.axis)
Q = []
if self.append:
for track in X:
Q.append(track)
for track in X:
channels = []
titles = []
if self.axis == "X":
signs = np.array([1,-1,-1])
if self.axis == "Y":
signs = np.array([-1,1,-1])
if self.axis == "Z":
signs = np.array([-1,-1,1])
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
new_df = pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
new_df[rxp] = | pd.Series(data=-signs[0]*euler_df[rxp], index=new_df.index) | pandas.Series |
import pandas as pd
import geopandas as gpd
from os import listdir, rename, path, remove, mkdir
from os.path import isfile, join, getsize, exists
from netCDF4 import Dataset
import time
import numpy as np
import sys
import calendar
import datetime as dt
import re
from socket import timeout
import subprocess
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
import urllib
import requests, json
from requests.auth import HTTPBasicAuth
import xmltodict
import pandas as pd
import geopandas as gpd
from shapely import wkt
'''
Module: s5p_no2_tools.py
============================================================================================
Disclaimer: The code is for demonstration purposes only. Users are responsible to check for
accuracy and revise to fit their objective.
nc_to_df adapted from read_tropomi_no2_and_dump_ascii.py by <NAME>, 2015
of NASA ARSET
Purpose of original code: To print all SDS from an TROPOMI file
Modified by <NAME> & <NAME>, May 10 2019 to read TROPOMI data
Modified by <NAME>, May 8, 2020 to as steps 1 and 2 of pipeline to process
TROPOMI NO2 data
============================================================================================
'''
def create_project(project_name='default'):
"""
Purpose: Create project subfolder
Parameters:
project_name (string): subfolder to create
Returns:
project_name (string)
"""
if not exists(project_name):
try:
mkdir(project_name)
except OSError:
print (f"Creation of the directory {project_name} failed.")
return ''
else:
print (f"Successfully created the directory {project_name}.")
return project_name
else:
print(f"Directory {project_name} exists.")
return project_name
def get_place_boundingbox(place_gdf, buffer):
"""
Purpose: Determine the bounding box of a place GeoDataFrame (polygon)
Parameters:
place_gdf (GeoDataFrame): place GeoDataFrame to get bounding box for. This should be
a Level 0 polygon from GADM.
buffer (int): buffer in miles to add to place geometry
Returns:
bbox (GeoDataFrame): GeoDataFrame containing the bounding box as geometry
"""
bbox = gpd.GeoDataFrame(geometry=place_gdf['geometry'].buffer(buffer).envelope, crs=place_gdf.crs)\
.reset_index()
return bbox
def filter_swath_set(swath_set_gdf, place_gdf):
"""
Purpose: Reduce the number of swaths based on place constraint.
Parameters:
swath_set_gdf (GeoDataFrame): A GeoDataFrame output from sending a query to the
sentinel_api_query() function. This GeoDataFrame contains
a geometry for the swath. This geometry should contain
the place_gdf geometry below.
place_gdf (GeoDataFrame): A GeoDataFrame that should be contained within the
swath_set_gdf geometry.
Returns:
filtered_gdf (GeoDataFrame): A subset of swath_set_gdf representing geometries that
contain the place_gdf geometry.
"""
filtered_gdf = gpd.sjoin(swath_set_gdf, place_gdf, how='right', op='contains').reset_index()
filtered_gdf = filtered_gdf.drop(columns=['level_0','index_left','index'])
return filtered_gdf
def geometry_to_wkt(place_gdf):
"""
Purpose: Sentinel 5P Data Access hub requires a constraining polygon filter to
retrieve a smaller number of satellite image swaths.
Parameters:
place_gdf (GeoDataFrame): Target place for obtaining NO2 levels. Place GDF should be
a simple, GADM level 0 polygon.
Returns:
wkt_string (string): string containing polygon vertices in WKT format
"""
# get geometry convex hull and simplify
geometry = place_gdf.reset_index()['geometry'].convex_hull.simplify(tolerance=0.05)
# convert to WKT
wkt_string = wkt.dumps(geometry[0])
return wkt_string
def date_from_week(weekstring='2019-W01'):
d = weekstring
r = dt.datetime.strptime(d + '-1', "%Y-W%W-%w")
return r
def add_days(start, numDays=1):
end = start + dt.timedelta(days=numDays)
startDate = start.strftime("%Y-%m-%d")
endDate = end.strftime("%Y-%m-%d")
return [startDate, endDate]
def nc_to_df(ncfile):
"""
Purpose: This converts a TROPOMI NO2 file to a Pandas DataFrame.
Notes:
This was adapted from read_tropomi_no2_and_dump_ascii.py by <NAME>, 2015
of NASA ARSET.
Parameters:
ncfile: NetCD4 file from Copernicus S5P Open Data Hub
Returns:
dataframe: data from NetCD4 file
"""
try:
f = open(ncfile, 'r')
except OSError:
print('cannot open', ncfile)
df = pd.DataFrame()
# read the data
if 'NO2___' in ncfile and 'S5P' in ncfile:
tic = time.perf_counter()
FILENAME = ncfile
print(ncfile+' is a TROPOMI NO2 file.')
#this is how you access the data tree in an NetCD4 file
SDS_NAME='nitrogendioxide_tropospheric_column'
file = Dataset(ncfile,'r')
grp='PRODUCT'
ds=file
grp='PRODUCT'
lat= ds.groups[grp].variables['latitude'][0][:][:]
lon= ds.groups[grp].variables['longitude'][0][:][:]
data= ds.groups[grp].variables[SDS_NAME]
#get necessary attributes
fv=data._FillValue
#get scan time and turn it into a vector
scan_time= ds.groups[grp].variables['time_utc']
# scan_time=geolocation['Time'][:].ravel()
year = np.zeros(lat.shape)
mth = np.zeros(lat.shape)
doy = np.zeros(lat.shape)
hr = np.zeros(lat.shape)
mn = np.zeros(lat.shape)
sec = np.zeros(lat.shape)
strdatetime = np.zeros(lat.shape)
for i in range(0,lat.shape[0]):
t = scan_time[0][i].split('.')[0]
t1 = t.replace('T',' ')
t2 = dt.datetime.strptime(t,'%Y-%m-%dT%H:%M:%S')
t3 = t2.strftime("%s")
#y = t2.year
#m = t2.month
#d = t2.day
#h = t2.hour
#m = t2.minute
#s = t2.second
#year[i][:] = y
#mth[i][:] = m
#doy[i][:] = d
#hr[i][:] = h
#mn[i][:] = m
#sec[i][:] = s
strdatetime[i][:] = t3
vlist = list(file[grp].variables.keys())
#df['Year'] = year.ravel()
#df['Month'] = mth.ravel()
#df['Day'] = doy.ravel()
#df['Hour'] = hr.ravel()
#df['Minute'] = mn.ravel()
#df['Second'] = sec.ravel()
df['UnixTimestamp'] = strdatetime.ravel()
df['DateTime'] = pd.to_datetime(df['UnixTimestamp'], unit='s')
df[['Date','Time']] = df['DateTime'].astype(str).str.split(' ',expand=True)
# This for loop saves all of the SDS in the dictionary at the top
# (dependent on file type) to the array (with titles)
for i in range(0,len(vlist)):
SDS_NAME=vlist[(i)] # The name of the sds to read
#get current SDS data, or exit program if the SDS is not found in the file
#try:
sds=ds.groups[grp].variables[SDS_NAME]
if len(sds.shape) == 3:
print(SDS_NAME,sds.shape)
# get attributes for current SDS
if 'qa' in SDS_NAME:
scale=sds.scale_factor
else: scale = 1.0
fv=sds._FillValue
# get SDS data as a vector
data=sds[:].ravel()
# The next few lines change fill value/missing value to NaN so
# that we can multiply valid values by the scale factor,
# then back to fill values for saving
data=data.astype(float)
data=(data)*scale
data[np.isnan(data)]=fv
data[data==float(fv)]=np.nan
df[SDS_NAME] = data
toc = time.perf_counter()
elapsed_time = toc-tic
print("Processed "+ncfile+" in "+str(elapsed_time/60)+" minutes")
else:
raise NameError('Not a TROPOMI NO2 file name.')
return df
def polygon_filter(input_df, filter_gdf):
"""
Purpose: This removes records from the TROPOMI NO2 Pandas DataFrame that
is not found within the filter polygons
Parameters:
input_df: Pandas DataFrame containing NO2 data coming from nc_to_df()
filter_gdf: GeoPandas GeoDataFrame containing geometries to constrain
NO2 records. Be sure to create the spatial index for filter_gdf to
speed up sjoin operation. You can do this by calling
filter_gdf.sindex before feeding filter_gdf into:
polygon_filter(input_df=input_df, filter_gdf=filter_gdf)
Returns:
geodataframe: Filtered GeoPandas GeoDataFrame
"""
print('To speed up the polygon_filter() operation, did you create the spatial index for filter_gdf?')
tic = time.perf_counter()
output_gdf = pd.DataFrame()
print('Processing input dataframe...')
crs = filter_gdf.crs
# 1. Convert input_df to gdf
gdf1 = gpd.GeoDataFrame(input_df, geometry=gpd.points_from_xy(input_df.longitude, input_df.latitude),crs=crs)
print('Original NO2 DataFrame length:', len(gdf1))
# 2. Find out intersection between African Countries GeoDataFrames (geometry) and
# NO2 GeoDataFrames using Geopandas sjoin (as GeoDataFrame, gdf2)
sjoin_gdf = gpd.sjoin(gdf1, filter_gdf, how='inner', op='intersects')
print('Filtered NO2 GeoDataFrame length:', len(sjoin_gdf))
toc = time.perf_counter()
elapsed_time = toc-tic
print("Processed NO2 DataFrame sjoin in "+str(elapsed_time/60)+" minutes")
return sjoin_gdf
def get_filename_from_cd(cd):
"""
Purpose: Get filename from content-disposition (cd)
Parameters:
cd (string): content-disposition
Returns:
fname[0] (string): filename
"""
if not cd:
return None
fname = re.findall('filename=(.+)', cd)
if len(fname) == 0:
return None
return fname[0]
def download_nc_file(url, auth, savedir, logging, refresh):
"""
Purpose: Download NetCD4 files from URL
Parameters:
url: string, download url obtained from Sentinel 5P Open Data Hub search results
auth: dictionary of '<PASSWORD>' and 'password'
savedir: string, path to save NetCD4 files
logging: boolean, turn logging on or off
refresh: boolean, overwrite previously downloaded files (helps save time if False)
Returns:
filename: string filename of NetCD4 file
"""
user = auth['user']
password = auth['password']
filename = 'temp.nc'
logfile = 'nc.log'
try:
refresh=refresh
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
}
tic = time.perf_counter()
with open(savedir+'/'+filename, 'wb') as f:
response = requests.get(url, auth=(user, password), stream=True, headers=headers)
filename0 = get_filename_from_cd(response.headers.get('content-disposition')).replace('"','')
if path.exists(savedir+'/'+filename0):
print('File '+filename0+' exists.')
if refresh==False:
filename0_size = getsize(savedir+'/'+filename0)
print('Filename size:', filename0_size,' bytes')
if filename0_size > 0:
remove(savedir+'/'+filename)
return filename0
print('Downloading '+filename0+'...')
total = response.headers.get('content-length')
if total is None:
f.write(response.content)
else:
downloaded = 0
total = int(total)
for data in response.iter_content(chunk_size=max(int(total/1000), 1024*1024)):
downloaded += len(data)
f.write(data)
done = int(50*downloaded/total)
sys.stdout.write('\r[{}{}]'.format('█' * done, '.' * (50-done)))
sys.stdout.flush()
if logging==True:
with open(logfile, 'a+') as l:
# Move read cursor to the start of file.
l.seek(0)
# If file is not empty then append '\n'
data = l.read(100)
if len(data) > 0 :
l.write("\n")
# Append text at the end of file
l.write(filename0)
sys.stdout.write('\n')
rename(savedir+'/'+filename, savedir+'/'+filename0)
toc = time.perf_counter()
elapsed_time = toc-tic
print('Success: Saved '+filename0+' to '+savedir+'.')
print('Download time, seconds: '+str(elapsed_time))
delays = [7, 4, 6, 2, 10, 15, 19, 23]
delay = np.random.choice(delays)
print('Delaying for '+str(delay)+' seconds...')
time.sleep(delay)
return filename0
except:
print('Something went wrong.')
def batch_download_nc_files(auth, savedir, url_file, numfiles, logging, refresh):
"""
Purpose: For batch downloading nc files from the Copernicus S5P Data Access Hub
Parameters:
auth (dict): authentication dictionary, {'user':'myusername', 'password':'<PASSWORD>'}
savedir (string): directory used to save NetCD4 files
url_file (string): file containing NetCD4 download URLS
numfiles (int)
logging (bool)
refresh (bool)
Returns:
df (DataFrame)
"""
savedir=savedir
url_file = url_file
df = | pd.read_csv(url_file) | pandas.read_csv |
import json
import requests
import pandas as pd
from datetime import datetime, timedelta
def get_data(state):
url = "http://sjc.salvar.cemaden.gov.br/resources/graficos/interativo/getJson2.php"
querystring = {'uf': state}
headers = {
'cache-control': 'no-cache',
# 'postman-token': '<PASSWORD>'
}
response = requests.request(
'GET', url, headers=headers, params=querystring)
data_df = json.loads(response.text)
df = pd.DataFrame(data_df)
#!Filter
df['date'] = pd.to_datetime(
df['datahoraUltimovalor'], format='%d/%m/%y %H:%M', errors='ignore').dt.tz_localize('UTC')
date_now = pd.to_datetime(
datetime.utcnow()-timedelta(minutes=20))
df = df[df['date'] >= | pd.Timestamp(date_now, tz="UTC") | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
df = pd.read_csv('processed.csv.gz')
# In[3]:
df.head()
# In[4]:
df.info()
# In[5]:
df = df.drop(columns=df.columns[0])
# In[6]:
df.head()
# In[7]:
df.groupby('vaderSentimentLabel').size()
# In[8]:
import matplotlib.pyplot as plt
# In[9]:
df.groupby('vaderSentimentLabel').count().plot.bar()
plt.show()
# In[10]:
df.groupby('ratingSentimentLabel').size()
# In[11]:
df.groupby('ratingSentimentLabel').count().plot.bar()
plt.show()
# In[12]:
df.groupby('ratingSentiment').size()
# In[13]:
positive_vader_sentiments = df[df.ratingSentiment == 2]
positive_string = []
for s in positive_vader_sentiments.cleanReview:
positive_string.append(s)
positive_string = pd.Series(positive_string).str.cat(sep=' ')
# In[14]:
from wordcloud import WordCloud
wordcloud = WordCloud(width=2000,height=1000,max_font_size=200).generate(positive_string)
plt.imshow(wordcloud,interpolation='bilinear')
plt.show()
# In[15]:
for s in positive_vader_sentiments.cleanReview[:20]:
if 'side effect' in s:
print(s)
# In[16]:
negative_vader_sentiments = df[df.ratingSentiment == 1]
negative_string = []
for s in negative_vader_sentiments.cleanReview:
negative_string.append(s)
negative_string = pd.Series(negative_string).str.cat(sep=' ')
# In[17]:
from wordcloud import WordCloud
wordcloud = WordCloud(width=2000,height=1000,max_font_size=200).generate(negative_string)
plt.imshow(wordcloud,interpolation='bilinear')
plt.axis('off')
plt.show()
# In[18]:
neutral_vader_sentiments = df[df.ratingSentiment == 0]
neutral_string = []
for s in neutral_vader_sentiments.cleanReview:
neutral_string.append(s)
neutral_string = pd.Series(neutral_string).str.cat(sep=' ')
# In[19]:
from wordcloud import WordCloud
wordcloud = WordCloud(width=2000,height=1000,max_font_size=200).generate(neutral_string)
plt.imshow(wordcloud,interpolation='bilinear')
plt.axis('off')
plt.show()
# In[20]:
for s in neutral_vader_sentiments.cleanReview[:20]:
if 'side effect' in s:
print(s)
# In[21]:
from sklearn.feature_extraction.text import TfidfVectorizer
# In[22]:
tfidf = TfidfVectorizer(stop_words='english',ngram_range=(1,2))
features = tfidf.fit_transform(df.cleanReview)
labels = df.vaderSentiment
# In[23]:
features.shape
# In[24]:
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
# In[25]:
x_train,x_test,y_train,y_test = train_test_split(df['cleanReview'],df['ratingSentimentLabel'],random_state=0)
# In[26]:
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
# In[27]:
models = [RandomForestClassifier(n_estimators=200,max_depth=3,random_state=0),LinearSVC(),MultinomialNB(),LogisticRegression(random_state=0,solver='lbfgs',max_iter=2000,multi_class='auto')]
CV = 5
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in models:
model_name = model.__class__.__name__
accuracies = cross_val_score(model,features,labels,scoring='accuracy',cv=CV)
for fold_idx,accuracy in enumerate(accuracies):
entries.append((model_name,fold_idx,accuracy))
cv_df = | pd.DataFrame(entries,columns=['model_name','fold_idx','accuracy']) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
Interval,
IntervalIndex,
Timedelta,
Timestamp,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4]), Index([1, 3, 5])),
(Index([0.0, 1.0, 2.0]), Index([1.0, 2.0, 3.0])),
(timedelta_range("0 days", periods=3), timedelta_range("1 day", periods=3)),
(date_range("20170101", periods=3), date_range("20170102", periods=3)),
(
date_range("20170101", periods=3, tz="US/Eastern"),
date_range("20170102", periods=3, tz="US/Eastern"),
),
],
ids=lambda x: str(x[0].dtype),
)
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
class TestAttributes:
@pytest.mark.parametrize(
"left, right",
[
(0, 1),
(Timedelta("0 days"), Timedelta("1 day")),
(Timestamp("2018-01-01"), Timestamp("2018-01-02")),
(
Timestamp("2018-01-01", tz="US/Eastern"),
Timestamp("2018-01-02", tz="US/Eastern"),
),
],
)
@pytest.mark.parametrize("constructor", [IntervalArray, IntervalIndex])
def test_is_empty(self, constructor, left, right, closed):
# GH27219
tuples = [(left, left), (left, right), np.nan]
expected = np.array([closed != "both", False, False])
result = constructor.from_tuples(tuples, closed=closed).is_empty
tm.assert_numpy_array_equal(result, expected)
class TestMethods:
@pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"])
def test_set_closed(self, closed, new_closed):
# GH 21670
array = IntervalArray.from_breaks(range(10), closed=closed)
result = array.set_closed(new_closed)
expected = IntervalArray.from_breaks(range(10), closed=new_closed)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
| Interval(0, 1, closed="right") | pandas.Interval |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2022/5/16 18:36
Desc: 新股和风险警示股
新浪-行情中心-沪深股市-次新股
http://vip.stock.finance.sina.com.cn/mkt/#new_stock
东方财富网-行情中心-沪深个股-风险警示板
http://quote.eastmoney.com/center/gridlist.html#st_board
"""
import math
import pandas as pd
import requests
def stock_zh_a_st_em() -> pd.DataFrame:
"""
东方财富网-行情中心-沪深个股-风险警示板
http://quote.eastmoney.com/center/gridlist.html#st_board
:return: 风险警示板
:rtype: pandas.DataFrame
"""
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f3',
'fs': 'm:0 f:4,m:1 f:4',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.t | o_numeric(temp_df['振幅'], errors="coerce") | pandas.to_numeric |
import matplotlib.pyplot as pl
import anndata as ad
import pandas as pd
import numpy as np
import scanpy as sc
import scvelo as scv
from scipy.sparse import issparse
import matplotlib.gridspec as gridspec
from scipy.stats import gaussian_kde, spearmanr, pearsonr
from goatools.obo_parser import GODag
from goatools.anno.genetogo_reader import Gene2GoReader
from goatools.goea.go_enrichment_ns import GOEnrichmentStudyNS
import seaborn as sns
import re
import os
import gzip
import mygene
from csv import Sniffer
signatures_path_= os.path.join(os.path.dirname(os.path.realpath(__file__)), 'metadata/')
def get_genefamily_percentage(adata, key='MT-', start=True, name='mito'):
keys = key if isinstance(key, list) else [key, '____ignore____']
if start:
family_genes = np.logical_or(*[adata.var_names.str.startswith(k) for k in keys])
else:
family_genes = np.logical_or(*[adata.var_names.str.endswith(k) for k in keys])
if issparse(adata.X):
adata.obs['percent_'+name] = np.sum(
adata[:, family_genes].X, axis=1).A1 / np.sum(adata.X, axis=1).A1
else:
adata.obs['percent_'+name] = np.sum(
adata[:, family_genes].X, axis=1) / np.sum(adata.X, axis=1)
def get_mito_percentage(adata, species='human'):
key = 'MT-' if species == 'human' else 'mt-'
get_genefamily_percentage(adata, key=key, start=True, name='mito')
def get_ribo_percentage(adata, species='human'):
key = specify_genes(['RPS', 'RPL'], species=species)
get_genefamily_percentage(adata, key=key, start=True, name='ribo')
def get_hemo_percentage(adata, species='human'):
key = specify_genes(['HBA', 'HBB'], species=species)
get_genefamily_percentage(adata, key=key, start=True, name='hemo')
def score_cell_cycle(adata, signatures_path=signatures_path_, species='human'):
adatas = adata if isinstance(adata, list) else [adata]
for i in range(len(adatas)):
adata = adatas[i]
# score cell cycle
# cc score with genes from Kowalczyk, <NAME>., et al. “Single-Cell RNA-Seq Reveals Changes in Cell Cycle and Differentiation Programs upon Aging of Hematopoietic Stem Cells.” Genome Research, vol. 25, no. 12, 2015, pp. 1860–72, doi:10.1101/gr.192237.115.
cell_cycle_genes = [x.strip() for x in open(signatures_path+'/regev_lab_cell_cycle_genes.txt')]
cell_cycle_genes = [x for x in cell_cycle_genes if x in adata.var_names]
# Split into 2 lists
s_genes = cell_cycle_genes[:43]
g2m_genes = cell_cycle_genes[43:]
# score
sc.tl.score_genes_cell_cycle(adata, s_genes=s_genes, g2m_genes=g2m_genes)
adatas[i] = adata
return adatas[0] if len(adatas)==1 else adatas
def score_smillie_str_epi_imm(adata, signatures_path=signatures_path_, species='human'):
tab=pd.read_excel(signatures_path+'/colonoid_cancer_uhlitz_markers_revised.xlsx', skiprows=1, index_col=0)
score_genes(adata, np.array(tab.index[tab['Epithelial']==1].values, dtype='str'), score_name='epi_score', species=species)
score_genes(adata, np.array(tab.index[tab['Stromal']==1].values, dtype='str'), score_name='str_score', species=species)
score_genes(adata, np.array(tab.index[tab['Immune']==1].values, dtype='str'), score_name='imm_score', species=species)
def score_tumor_immune_cells(adata, signatures_path=signatures_path_, species='human'):
# ImSigGenes immune tumor signatures
tab=pd.read_excel(signatures_path+'/ImSigGenes_immunetumor.xlsx', skiprows=2, index_col=1)
annot = dict()
for ct in pd.unique(tab.Signature):
annot[ct] = tab[tab.Signature==ct].index.values
for ct in annot.keys():
score_genes(adata, annot[ct], score_name=ct, species=species)
def calc_qc_scvelo(adata):
adatas = adata if isinstance(adata, list) else [adata]
for adata in adatas:
# obs qc
adata.obs['ucounts'] = rsum(adata.layers['unspliced'], axis=1)
adata.obs['scounts'] = rsum(adata.layers['spliced'], axis=1)
adata.obs['ufeatures'] = rsum(adata.layers['unspliced']>0, axis=1)
adata.obs['sfeatures'] = rsum(adata.layers['spliced']>0, axis=1)
# var qc
adata.var['ucounts'] = rsum(adata.layers['unspliced'], axis=0)
adata.var['scounts'] = rsum(adata.layers['spliced'], axis=0)
adata.var['ucells'] = rsum(adata.layers['unspliced']>0, axis=0)
adata.var['scells'] = rsum(adata.layers['spliced']>0, axis=0)
def calc_qc(adata, extended_genesets=False, species='detect'):
adatas = adata if isinstance(adata, list) else [adata]
for adata in adatas:
# qc counts
adata.obs['ncounts'] = rsum(adata.X, axis=1)
adata.obs['ngenes'] = rsum(adata.X>0, axis=1)
adata.var['ncounts'] = rsum(adata.X, axis=0)
adata.var['ncells'] = rsum(adata.X>0, axis=0)
species = detect_organism(adata) if species == 'detect' else species
# gene modules
# mitochondrial genes
get_mito_percentage(adata, species)
# ribosomal genes
get_ribo_percentage(adata, species)
# hemoglobin genes
get_hemo_percentage(adata, species)
if extended_genesets:
if species is not 'human':
raise ValueError(species,' species is not known. Pls do not use extended_genesets=True.')
# interferon genes, immune response
get_genefamily_percentage(adata, key='IFIT', start=True, name='ifit')
# Cell adhesion molecules genes
get_genefamily_percentage(adata, key='CAM', start=False, name='cam')
# HLA genes encode MHC I and MHC II
get_genefamily_percentage(adata, key='HLA-', start=True, name='hla') # genome specific sometimes!!!
# S100 genes, saw them often in organoids
get_genefamily_percentage(adata, key='S100', start=True, name='s100')
# FOX genes, TFs
get_genefamily_percentage(adata, key='FOX', start=True, name='fox')
# Heat shock protein genes
get_genefamily_percentage(adata, key='HSP', start=True, name='heatshock')
# ABC transporter genes, can lead to multi-drug resistance in cancer
get_genefamily_percentage(adata, key='ABC', start=True, name='abc')
def specify_genes(genes, species='human'):
genes = genes if isinstance(genes, list) else list(genes) if isinstance(genes, np.ndarray) else [genes]
if species is 'human':
return [x.upper() for x in genes]
elif species is 'mouse':
return [x.capitalize() for x in genes]
else:
raise ValueError('Species '+species+' not known.')
def score_genes(adata, gene_list, score_name, species='human', **kwargs):
gene_list_ = specify_genes(gene_list, species=species)
sc.tl.score_genes(adata, gene_list_, score_name=score_name)
def score_hallmarks(adata, subset='organoid', signatures_path=signatures_path_, species='human'):
sc.settings.verbosity = 0
# subset can be a list of hallmarks, 'organoid' (), 'CRC' (~18) or 'all' (50 scores)
tab = pd.read_csv(signatures_path + 'h.all.v6.2.symbols.gmt', sep='\t', index_col=0, header=None).drop(1, axis=1).T
hallsigs={hallmark : tab[hallmark][~pd.isna(tab[hallmark])].values for hallmark in tab.columns}
if isinstance(subset, list):
selection = subset
elif subset == 'organoid':
selection = ['HALLMARK_DNA_REPAIR', 'HALLMARK_WNT_BETA_CATENIN_SIGNALING', 'HALLMARK_APOPTOSIS']
elif subset == 'CRC': # TODO this list is bugged, some entries do not exist
selection = ['HALLMARK_DNA_REPAIR', 'HALLMARK_WNT_BETA_CATENIN_SIGNALING', 'HALLMARK_APOPTOSIS',
'HALLMARK_NOTCH_SIGNALING', 'HALLMARK_TNFA_SIGNALING_VIA_NFKB', 'HALLMARK_HYPOXIA', 'HALLMARK_TGF_BETA_SIGNALING',
'HALLMARK_MITOTIC_SPINDLE', 'HALLMARK_MTORC1_SIGNALING', 'HALLMARK_PI3K_AKT_MTOR_SIGNALING', 'HALLMARK_PROTEIN_SECRETION'
'HALLMARK_G2M_CHECKPOINT', 'HALLMARK_EPITHELIAL_MESENCHYMAL_TRANSITION', 'HALLMARK_OXIDATIVE_PHOSPHORYLATION',
'HALLMARK_P53_PATHWAY', 'HALLMARK_ANGIOGENESIS', 'HALLMARK_KRAS_SIGNALING_UP', 'HALLMARK_KRAS_SIGNALING_DN',
'HALLMARK_GLYCOLYSIS']
elif subset == 'all':
selection = hallsigs.keys()
else:
raise ValueError('Please select a valid subset of hallmark to use. You can also choose "all".')
for hm in selection:
score_genes(adata, hallsigs[hm], score_name=hm, species=species)
def lin_corr_adata(adata, x, keys, method='spearman'):
"""Linearly correlates features (genes/obs_keys) of adata with a given array.
Computes pearson linear correlation (r and p value) for each selected feature
with the given values in x.
----------
adata: An adata object.
x: numeric numpy array
For example x = adata.obsm['X_diffmap'][:,1] or another gene's
expression.
keys: Either a list of genes or a list of adata.obs.columns.
method: Either 'spearman' or 'pearson'.
Returns
-------
df: A pandas DataFrame
The dataframe has genes as index and columns pearson_r and pearson_p. It
is sorted by correlation coefficient (pearson_r).
"""
# input keys may be list or str, make list
keys = [keys] if isinstance(keys, str) else keys
# select correlation method
if method == 'spearman':
correlate = spearmanr
elif method == 'pearsonr':
correlate = pearsonr
else:
raise ValueError(f'Method {method} not valid (pearson or spearman only).')
# feature set
if all(np.isin(keys, adata.obs.columns)):
feature_type = 'obs_keys'
Y = adata.obs[keys].values
elif any(np.isin(keys, adata.var_names)):
feature_type = 'genes'
Y = adata.X.A if issparse(adata.X) else adata.X
else:
raise ValueError('Keys must be list of genes or adata.obs keys.')
# linearly correlated
lincors = []
for i, key in enumerate(keys):
y = Y[:, i]
r, p = correlate(x, y)
lincors.append([key, r, p])
# format result as pandas.DataFrame
df = pd.DataFrame(lincors, columns=[feature_type, f'{method}_r', f'{method}_p']).set_index(feature_type)
df = df.sort_values(f'{method}_r', ascending=False) # sort by correlation
return df
def kde_trajectory(adata, key, groupby, velocity=False, rug_keys=[], component=1,
figsize=[15,5], n_convolve=10, ax=None, show=True, n_eval=200,
range_percs=[0,100], linewidth=4, rug_alpha=0.1,
n=19, ylim=30, scale=8):
X = adata.obsm['X_'+key] if 'X_'+key in adata.obsm.keys() else adata.obsm[key] if key in adata.obsm.keys() else adata.obs[key]
X = X[:, component] if len(X.shape)>1 else X
if velocity:
V = adata.obsm['velocity_'+key] if 'velocity_'+key in adata.obsm.keys() else adata.obsm[key] if key in adata.obsm.keys() else adata.obs[key]
V = V[:, component] if len(V.shape)>1 else V
ax = pl.figure(figsize=figsize).gca() if ax is None else ax
xmin = np.percentile(X, range_percs[0])
xmax = np.percentile(X, range_percs[1])
ev = np.linspace(xmin, xmax, n_eval)
# plot density per group
for i, cond in enumerate(np.sort(pd.unique(adata.obs[groupby]))):
mask = adata.obs[groupby] == cond
kernel = gaussian_kde(X[mask])
ax.plot(ev, kernel(ev), label=cond, linewidth=linewidth, color=adata.uns[groupby+'_colors'][i])
if velocity:
# arrow projections
edges = np.linspace(ev[0], ev[-1], n)
bins = [(edges[k]<X) & (X<edges[k+1]) for k in range(n-1)]
in_bin = bins[2]
xs = np.array([np.mean(X[mask & in_bin]) for in_bin in bins])
ys = np.array([np.mean(kernel(X[mask & in_bin])) for in_bin in bins])
vs = np.array([np.mean(V[mask & in_bin]) if y>ylim else 0 for y, in_bin in zip(ys, bins)])
vs = vs / np.max(np.abs(vs))
# pl.plot(X[mask & in_bin], kernel(X[mask & in_bin]), label=cond, linewidth=linewidth, color='red')
# pl.quiver(X[mask & in_bin], kernel(X[mask & in_bin]), V[mask & in_bin], 0)
ix = np.abs(vs) > 0
ax.quiver(xs[ix], ys[ix], vs[ix], 0 , zorder=100, scale_units='width', scale=scale, color=adata.uns[groupby+'_colors'][i])
# plot categorical annotations as rug
rug_y = ax.get_ylim()[1]/10
rug_keys = rug_keys if isinstance(rug_keys, list) else [rug_keys]
for i, rug in enumerate(rug_keys):
for j, cond in enumerate(np.sort(pd.unique(adata.obs[rug]))):
mask = (adata.obs[rug] == cond) & (X>xmin) & (X<xmax)
plot = ax.plot(X[mask], np.zeros(np.sum(mask)) - rug_y * (i+1), '|', color=adata.uns[rug+'_colors'][j], ms=10, alpha=rug_alpha)
ax.set_xticks([])
ax.set_xlabel(key + ' component '+str(component))
ax.set_ylabel(f'Cell density (KDE) by {groupby}')
ax.set_yticks(ax.get_yticks()[ax.get_yticks()>=0])
ax.axhline(y=0, c='k')
ax.legend()
if show:
pl.show()
else:
return
def diffusion_analysis_(adata, groupby, species='human', component=1, corr_cutoff=0.1, figsize=[10,8], range_percs=[3,97], velocity_mode=None, show=True):
"""Performs a diffusion analysis on adata for a specific diffusion component.
velocity_mode may be None, 'on density', 'average' or , 'single'
----------
adata: An adata object.
Returns
-------
None
"""
ckey = 'DC'+str(component)
add_velocity_subplot = velocity_mode!=None and velocity_mode!='on density'
# set layout
fig = pl.figure(constrained_layout=True, figsize=figsize)
widths = [1, 1, 1]
n_rows = 3 + add_velocity_subplot
heights = [1] * n_rows
spec = fig.add_gridspec(ncols=3, nrows=n_rows, width_ratios=widths,
height_ratios=heights)
ax0 = fig.add_subplot(spec[0, :])
kde_trajectory(adata, key='diffmap', groupby=groupby, range_percs=range_percs, ax=ax0,
show=False, component=component,
velocity=velocity_mode=='on density'
)
ax0.set_xlabel('diffusion pseudotime')
ax0.set_ylabel('cell density')
def add_annotation(row, keys, fig, df, name):
n_top=8
ax_0 = fig.add_subplot(spec[row, 0])
ax_1 = fig.add_subplot(spec[row, 1], sharey=ax_0)
ax_2 = fig.add_subplot(spec[row, 2], sharey=ax_0)
ax_0.set_axis_off()
ax_2.set_axis_off()
# Arrows
ax_0.annotate('', xy=(.4, 1), xytext=(.6, 1),
arrowprops=dict(facecolor='black', shrink=0.05), rotation=90)
ax_2.annotate('', xy=(.6, 1), xytext=(.4, 1),
arrowprops=dict(facecolor='black', shrink=0.05), rotation=90)
# Texts
neg_df = df['spearman_r'][df['spearman_r']<-corr_cutoff].iloc[::-1][:n_top]
pos_df = df['spearman_r'][df['spearman_r']>corr_cutoff][:n_top]
for i, hallmark in enumerate(neg_df.index):
ax_0.text(0.5, .8 - i/len(hallmarks), hallmark.replace('HALLMARK_','').replace('_',' '), ha='center', va='center')
for i, hallmark in enumerate(pos_df.index):
ax_2.text(0.5, .8 - i/len(hallmarks), hallmark.replace('HALLMARK_','').replace('_',' '), ha='center', va='center')
# Barplot
ax_1.barh([.8- i/10 for i in range(len(neg_df))], neg_df.values, align='center', height=0.08, color='tab:blue')
ax_1.barh([.8- i/10 for i in range(len(pos_df))], pos_df.values, align='center', height=0.08, color='tab:red')
ax_1.spines['right'].set_visible(False)
ax_1.spines['left'].set_visible(False)
ax_1.spines['top'].set_visible(False)
ax_1.set_yticks([])
m = np.max(np.abs(df['spearman_r']))
ax_1.set_xlim([-m,m])
ax_1.set_xlabel(f'correlation between diffusion axis \n and {name} expression \n (spearman R)')
ax_1.set_ylim([0,1])
### Pathways
# aggregate hallmarks
dfs = []
hallmarks = ['HALLMARK_ANGIOGENESIS', 'HALLMARK_APOPTOSIS', 'HALLMARK_COAGULATION', 'HALLMARK_COMPLEMENT',
'HALLMARK_IL2_STAT5_SIGNALING', 'HALLMARK_INFLAMMATORY_RESPONSE',
'HALLMARK_INTERFERON_ALPHA_RESPONSE', 'HALLMARK_INTERFERON_GAMMA_RESPONSE', 'HALLMARK_PI3K_AKT_MTOR_SIGNALING',
'HALLMARK_TGF_BETA_SIGNALING', 'HALLMARK_XENOBIOTIC_METABOLISM']
if not all(np.isin(hallmarks, adata.obs.keys())): score_hallmarks(adata, species=species, subset=hallmarks)
df_hallmarks = lin_corr_adata(adata, adata.obsm['X_diffmap'][:, component], hallmarks)
df_hallmarks = df_hallmarks[~pd.isna(df_hallmarks.spearman_r)]
add_annotation(-2, hallmarks, fig, df_hallmarks, 'signature score')
### Genes
df_genes = lin_corr_adata(adata, adata.obsm['X_diffmap'][:, component], adata.var_names)
df_genes = df_genes[~pd.isna(df_genes.spearman_r)]
add_annotation(-1, hallmarks, fig, df_genes, 'gene')
### velocities
if add_velocity_subplot:
ax1 = fig.add_subplot(spec[1, :], sharex=ax0)
groups = list(adata.obs[groupby].cat.categories)
colors = adata.uns[f'{groupby}_colors']
x = adata.obsm['X_diffmap'][:, component]
v = adata.obsm['velocity_diffmap'][:, component]
mask0 = (x>np.percentile(x, range_percs[0])) & (x<np.percentile(x, range_percs[1]))
if velocity_mode=='single':
for i, group in enumerate(groups):
mask = (adata.obs[groupby] == group) & mask0
ax1.quiver(x[mask], np.random.uniform(1-i, -i, x.shape)[mask], v[mask], np.zeros_like(v)[mask], color=colors[i], scale=0.4, edgecolor='k', linewidth = .5)
ax1.set_ylabel(f'RNA velocity\nby {groupby}')
else:
from scipy.interpolate import interp1d
n_evals = 10
xint=np.linspace(np.percentile(x, range_percs[0]), np.percentile(x, range_percs[1]), n_evals)
for i, group in enumerate(groups):
mask = (adata.obs[groupby] == group) & mask0
f = interp1d(x[mask], v[mask])
x_int = xint[(xint >= np.min(x[mask])) & (xint <= np.max(x[mask]))]
v_int = f(x_int)
# Normalize
v_absmax = np.max(np.abs(v_int))
x_segment = (x_int[1] - x_int[0]) / (n_evals/5)
v_int = v_int * x_segment / v_absmax
ax1.quiver(x_int, i * np.ones_like(x_int), v_int, np.zeros_like(v_int),
headwidth=4, color=colors[i], edgecolor='k', linewidth = .5, angles='xy', scale_units='xy', scale=1)
ax1.set_ylim(-1, len(groups))
ax1.set_ylabel(f'Average RNA velocity\nby {groupby}')
ax1.set_yticks([])
# pl.suptitle('Neutrophil cell density on diffusion pseudotime')
if show: pl.show()
def identify_barcode_overlap(df1, df2, key1, key2, reg1='[ACGT]+-', reg2='[ACGT]+-', kick=-1, plot=True):
# clear index
x1 = np.array([re.findall(reg1, txt)[0][:kick] for txt in df1.index])
x2 = np.array([re.findall(reg2, txt)[0][:kick] for txt in df2.index])
# count co-occurences of barcodes by key categories
c1 = pd.unique(df1[key1])
c2 = | pd.unique(df2[key2]) | pandas.unique |
# coding: utf8
import torch
import numpy as np
import os
import warnings
import pandas as pd
from time import time
import logging
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from sklearn.utils import column_or_1d
import scipy.sparse as sp
from clinicadl.tools.deep_learning.iotools import check_and_clean
from clinicadl.tools.deep_learning import EarlyStopping, save_checkpoint
#####################
# CNN train / test #
#####################
def train(model, train_loader, valid_loader, criterion, optimizer, resume, log_dir, model_dir, options, logger=None):
"""
Function used to train a CNN.
The best model and checkpoint will be found in the 'best_model_dir' of options.output_dir.
Args:
model: (Module) CNN to be trained
train_loader: (DataLoader) wrapper of the training dataset
valid_loader: (DataLoader) wrapper of the validation dataset
criterion: (loss) function to calculate the loss
optimizer: (torch.optim) optimizer linked to model parameters
resume: (bool) if True, a begun job is resumed
log_dir: (str) path to the folder containing the logs
model_dir: (str) path to the folder containing the models weights and biases
options: (Namespace) ensemble of other options given to the main script.
logger: (logging object) writer to stdout and stderr
"""
from tensorboardX import SummaryWriter
from time import time
if logger is None:
logger = logging
columns = ['epoch', 'iteration', 'time',
'balanced_accuracy_train', 'loss_train',
'balanced_accuracy_valid', 'loss_valid']
if hasattr(model, "variational") and model.variational:
columns += ["kl_loss_train", "kl_loss_valid"]
filename = os.path.join(os.path.dirname(log_dir), 'training.tsv')
if not resume:
check_and_clean(model_dir)
check_and_clean(log_dir)
results_df = pd.DataFrame(columns=columns)
with open(filename, 'w') as f:
results_df.to_csv(f, index=False, sep='\t')
options.beginning_epoch = 0
else:
if not os.path.exists(filename):
raise ValueError('The training.tsv file of the resumed experiment does not exist.')
truncated_tsv = pd.read_csv(filename, sep='\t')
truncated_tsv.set_index(['epoch', 'iteration'], inplace=True)
truncated_tsv.drop(options.beginning_epoch, level=0, inplace=True)
truncated_tsv.to_csv(filename, index=True, sep='\t')
# Create writers
writer_train = SummaryWriter(os.path.join(log_dir, 'train'))
writer_valid = SummaryWriter(os.path.join(log_dir, 'validation'))
# Initialize variables
best_valid_accuracy = -1.0
best_valid_loss = np.inf
epoch = options.beginning_epoch
model.train() # set the model to training mode
train_loader.dataset.train()
early_stopping = EarlyStopping('min', min_delta=options.tolerance, patience=options.patience)
mean_loss_valid = None
t_beginning = time()
while epoch < options.epochs and not early_stopping.step(mean_loss_valid):
logger.info("Beginning epoch %i." % epoch)
model.zero_grad()
evaluation_flag = True
step_flag = True
tend = time()
total_time = 0
for i, data in enumerate(train_loader, 0):
t0 = time()
total_time = total_time + t0 - tend
if options.gpu:
imgs, labels = data['image'].cuda(), data['label'].cuda()
else:
imgs, labels = data['image'], data['label']
if hasattr(model, "variational") and model.variational:
z, mu, std, train_output = model(imgs)
kl_loss = kl_divergence(z, mu, std)
loss = criterion(train_output, labels) + kl_loss
else:
train_output = model(imgs)
loss = criterion(train_output, labels)
# Back propagation
loss.backward()
del imgs, labels
if (i + 1) % options.accumulation_steps == 0:
step_flag = False
optimizer.step()
optimizer.zero_grad()
del loss
# Evaluate the model only when no gradients are accumulated
if options.evaluation_steps != 0 and (i + 1) % options.evaluation_steps == 0:
evaluation_flag = False
_, results_train = test(model, train_loader, options.gpu, criterion)
mean_loss_train = results_train["total_loss"] / (len(train_loader) * train_loader.batch_size)
_, results_valid = test(model, valid_loader, options.gpu, criterion)
mean_loss_valid = results_valid["total_loss"] / (len(valid_loader) * valid_loader.batch_size)
model.train()
train_loader.dataset.train()
global_step = i + epoch * len(train_loader)
writer_train.add_scalar('balanced_accuracy', results_train["balanced_accuracy"], global_step)
writer_train.add_scalar('loss', mean_loss_train, global_step)
writer_valid.add_scalar('balanced_accuracy', results_valid["balanced_accuracy"], global_step)
writer_valid.add_scalar('loss', mean_loss_valid, global_step)
logger.info("%s level training accuracy is %f at the end of iteration %d"
% (options.mode, results_train["balanced_accuracy"], i))
logger.info("%s level validation accuracy is %f at the end of iteration %d"
% (options.mode, results_valid["balanced_accuracy"], i))
t_current = time() - t_beginning
row = [epoch, i, t_current,
results_train["balanced_accuracy"], mean_loss_train,
results_valid["balanced_accuracy"], mean_loss_valid]
if hasattr(model, "variational") and model.variational:
row += [results_train["total_kl_loss"] / (len(train_loader) * train_loader.batch_size),
results_valid["total_kl_loss"] / (len(valid_loader) * valid_loader.batch_size)]
row_df = | pd.DataFrame([row], columns=columns) | pandas.DataFrame |
from urllib.request import urlopen
from http.cookiejar import CookieJar
from io import StringIO
from app.extensions import cache
from app.api.constants import PERMIT_HOLDER_CACHE, DORMANT_WELLS_CACHE, LIABILITY_PER_WELL_CACHE, TIMEOUT_15_MINUTES, TIMEOUT_60_MINUTES, TIMEOUT_12_HOURS, TIMEOUT_1_YEAR
from flask import Flask, current_app
from threading import Thread
import requests
import urllib
import pandas as pd
import pyarrow as pa
import time
from .ogc_data_constants import PERMIT_HOLDER_CSV_DATA, DORMANT_WELLS_CSV_DATA, LIABILITY_PER_WELL_CSV_DATA
# TODO: Stick into environment variables
PERMIT_HOLDER_CSV = 'http://reports.bcogc.ca/ogc/f?p=200:201:14073940726161:CSV::::'
DORMANT_WELLS_CSV = 'https://reports.bcogc.ca/ogc/f?p=200:81:9680316354055:CSV::::'
LIABILITY_PER_WELL_CSV = 'https://reports.bcogc.ca/ogc/f?p=200:10:10256707131131:CSV::::'
session = requests.session()
def refreshOGCdata(app, cache_key, csv_url, process):
with app.app_context():
serializer = pa.default_serialization_context()
data = cache.get(cache_key)
expiry_token = cache.get(cache_key + '_EXPIRY_TOKEN')
if not expiry_token:
current_app.logger.debug(f'OGC DATA SERVICE - {cache_key} - Cached data not found.')
# set 15 minute token to mitigate multiple threads requesting data at the same time
cache.set(cache_key + '_EXPIRY_TOKEN', True, timeout=TIMEOUT_15_MINUTES)
else:
current_app.logger.debug(f'OGC DATA SERVICE - {cache_key} - Cached data up to date.')
return
try:
cookieProcessor = urllib.request.HTTPCookieProcessor()
opener = urllib.request.build_opener(cookieProcessor)
response = session.get(csv_url)
df = pd.read_table(StringIO(response.text), sep=",")
df = process(df)
updated_from_web = True
current_app.logger.debug(
f'OGC DATA SERVICE - {cache_key} - Successful get from OGC reporting.')
df = process(df)
except:
# on error, if we don't have data in the cache initialize it from static content
if not data:
current_app.logger.debug(
f'OGC DATA SERVICE - {cache_key} - Falling back to static content.')
if cache_key is PERMIT_HOLDER_CACHE:
df = pd.read_table(StringIO(PERMIT_HOLDER_CSV_DATA), sep=",")
if cache_key is DORMANT_WELLS_CACHE:
df = pd.read_table(StringIO(DORMANT_WELLS_CSV_DATA), sep=",")
if cache_key is LIABILITY_PER_WELL_CACHE:
df = pd.read_table(StringIO(LIABILITY_PER_WELL_CSV_DATA), sep=",")
df = process(df)
row_count = df.shape[0]
# only update cache if there is a good dataset
if row_count > 1:
current_app.logger.debug(f'OGC DATA SERVICE - {cache_key} - Updating cached data.')
cache.set(
cache_key,
serializer.serialize(df).to_buffer().to_pybytes(),
timeout=TIMEOUT_1_YEAR)
if updated_from_web:
cache.set(cache_key + '_EXPIRY_TOKEN', True, timeout=TIMEOUT_60_MINUTES)
else:
current_app.logger.warning(
f'OGC DATA SERVICE - {cache_key} - FAILED TO RETRIEVE UPDATED DATA')
class OGCDataService():
@classmethod
def refreshAllData(cls):
cls.getPermitHoldersDataFrame()
cls.getDormantWellsDataFrame()
cls.getLiabilityPerWellDataFrame()
@classmethod
def getOGCdataframe(cls, cache_key, csv_url, process):
serializer = pa.default_serialization_context()
data = cache.get(cache_key)
app = current_app._get_current_object()
#if empty dataset refresh data synchronously, otherwise refresh in the background and continue
if not data:
df = refreshOGCdata(app, cache_key, csv_url, process)
else:
thread = Thread(
target=refreshOGCdata, args=(
app,
cache_key,
csv_url,
process,
))
thread.daemon = True
thread.start()
#update data and return
data = cache.get(cache_key)
if data:
df = serializer.deserialize(data)
return df
@classmethod
def getPermitHoldersDataFrame(cls):
def process(df):
df.columns = [
'operator_id', 'organization_name', 'phone_num', 'address_line_1', 'address_line_2',
'city', 'province', 'postal_code', 'country'
]
return df
return cls.getOGCdataframe(PERMIT_HOLDER_CACHE, PERMIT_HOLDER_CSV, process)
@classmethod
def getDormantWellsDataFrame(cls):
def process(df):
df.columns = [
'operator_name', 'operator_id', 'well_auth_number', 'well_name', 'dormant_status',
'current_status', 'well_dormancy_date', 'site_dormancy_date', 'site_dormancy_type',
'site_dormant_status', 'surface_location', 'field', 'abandonment_date',
'last_spud_date', 'last_rig_rels_date', 'last_completion_date',
'last_active_production_year', 'last_active_inj_display_year',
'wellsite_dormancy_declaration_date', 'multi_well'
]
df['well_dormancy_date'] = pd.to_datetime(
df['well_dormancy_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['site_dormancy_date'] = pd.to_datetime(
df['site_dormancy_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['abandonment_date'] = pd.to_datetime(
df['abandonment_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['last_spud_date'] = pd.to_datetime(
df['last_spud_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if | pd.notnull(x) | pandas.notnull |
"""
GLM fitting utilities based on NeuroGLM by <NAME>, <NAME>:
https://github.com/pillowlab/neuroGLM
<NAME>
International Brain Lab, 2020
"""
from warnings import warn, catch_warnings
import numpy as np
from numpy.linalg.linalg import LinAlgError
import pandas as pd
from brainbox.processing import bincount2D
from sklearn.linear_model import PoissonRegressor
import scipy.sparse as sp
import numba as nb
from numpy.matlib import repmat
from scipy.optimize import minimize
from scipy.special import xlogy
from tqdm import tqdm
import torch
from brainbox.modeling.poissonGLM import PoissonGLM
class NeuralGLM:
"""
Generalized Linear Model which seeks to describe spiking activity as the output of a poisson
process. Uses sklearn's GLM methods under the hood while providing useful routines for dealing
with neural data
"""
def __init__(self, trialsdf, spk_times, spk_clu, vartypes,
train=0.8, blocktrain=False, binwidth=0.02, mintrials=100, subset=False):
"""
Construct GLM object using information about all trials, and the relevant spike times.
Only ingests data, and further object methods must be called to describe kernels, gain
terms, etc. as components of the model.
Parameters
----------
trialsdf: pandas.DataFrame
DataFrame of trials in which each row contains all desired covariates of the model.
e.g. contrast, stimulus type, etc. Not all columns will necessarily be fit.
If a continuous covariate (e.g. wheel position, pupil diameter) is included, each entry
of the column must be a nSamples x 2 array with samples in the first column and
timestamps (relative to trial start) in the second position.
*Must have \'trial_start\' and \'trial_end\' parameters which are times, in seconds.*
spk_times: numpy.array of floats
1-D array of times at which spiking events were detected, in seconds.
spk_clu: numpy.array of integers
1-D array of same shape as spk_times, with integer cluster IDs identifying which
cluster a spike time belonged to.
vartypes: dict
Dict with column names in trialsdf as keys, values are the type of covariate the column
contains. e.g. {'stimOn_times': 'timing', 'wheel', 'continuous', 'correct': 'value'}
Valid values are:
'timing' : A timestamp relative to trial start (e.g. stimulus onset)
'continuous' : A continuous covariate sampled throughout the trial (e.g. eye pos)
'value' : A single value for the given trial (e.g. contrast or difficulty)
train: float
Float in (0, 1] indicating proportion of data to use for training GLM vs testing
(using the NeuralGLM.score method). Trials to keep will be randomly sampled.
binwidth: float
Width, in seconds, of the bins which will be used to count spikes. Defaults to 20ms.
mintrials: int
Minimum number of trials in which neurons fired a spike in order to be fit. Defaults
to 100 trials.
subset: bool
Whether or not to perform model subsetting, in which the model is built iteratively
from only the mean rate, up. This allows comparison of D^2 scores for sub-models which
incorporate only some parameters, to see which regressors actually improve
explainability. Default to False.
Returns
-------
glm: object
GLM object with methods for adding regressors and fitting
"""
# Data checks #
if not all([name in vartypes for name in trialsdf.columns]):
raise KeyError("Some columns were not described in vartypes")
if not all([value in ('timing', 'continuous', 'value') for value in vartypes.values()]):
raise ValueError("Invalid values were passed in vartypes")
if not len(spk_times) == len(spk_clu):
raise IndexError("Spike times and cluster IDs are not same length")
if not isinstance(train, float) and not train == 1:
raise TypeError('train must be a float between 0 and 1')
if not ((train > 0) & (train <= 1)):
raise ValueError('train must be between 0 and 1')
# Filter out cells which don't meet the criteria for minimum spiking, while doing trial
# assignment
self.vartypes = vartypes
self.vartypes['duration'] = 'value'
trialsdf = trialsdf.copy() # Make sure we don't modify the original dataframe
clu_ids = np.unique(spk_clu)
trbounds = trialsdf[['trial_start', 'trial_end']] # Get the start/end of trials
# Initialize a Cells x Trials bool array to easily see how many trials a clu spiked
trialspiking = np.zeros((trialsdf.index.max() + 1, clu_ids.max() + 1), dtype=bool)
# Empty trial duration value to use later
trialsdf['duration'] = np.nan
# Iterate through each trial, and store the relevant spikes for that trial into a dict
# Along with the cluster labels. This makes binning spikes and accessing spikes easier.
spks = {}
clu = {}
st_endlast = 0
timingvars = [col for col in trialsdf.columns if vartypes[col] == 'timing']
for i, (start, end) in trbounds.iterrows():
if any(np.isnan((start, end))):
warn(f"NaN values found in trial start or end at trial number {i}. "
"Discarding trial.")
trialsdf.drop(i, inplace=True)
continue
st_startind = np.searchsorted(spk_times[st_endlast:], start) + st_endlast
st_endind = np.searchsorted(spk_times[st_endlast:], end, side='right') + st_endlast
st_endlast = st_endind
trial_clu = np.unique(spk_clu[st_startind:st_endind])
trialspiking[i, trial_clu] = True
spks[i] = spk_times[st_startind:st_endind] - start
clu[i] = spk_clu[st_startind:st_endind]
for col in timingvars:
trialsdf.at[i, col] = np.round(trialsdf.at[i, col] - start, decimals=5)
trialsdf.at[i, 'duration'] = end - start
# Break the data into test and train sections for cross-validation
if train == 1:
print('Training fraction set to 1. Training on all data.')
traininds = trialsdf.index
testinds = trialsdf.index
elif blocktrain:
trainlen = int(np.floor(len(trialsdf) * train))
traininds = trialsdf.index[:trainlen]
testinds = trialsdf.index[trainlen:]
else:
trainlen = int(np.floor(len(trialsdf) * train))
traininds = sorted(np.random.choice(trialsdf.index, trainlen, replace=False))
testinds = trialsdf.index[~trialsdf.index.isin(traininds)]
# Set model parameters to begin with
self.spikes = spks
self.clu = clu
self.clu_ids = np.argwhere(np.sum(trialspiking, axis=0) > mintrials)
self.binwidth = binwidth
self.covar = {}
self.trialsdf = trialsdf
self.traininds = traininds
self.testinds = testinds
self.compiled = False
self.subset = subset
if len(self.clu_ids) == 0:
raise UserWarning('No neuron fired a spike in a minimum number.')
# Bin spikes
self._bin_spike_trains()
return
def _bin_spike_trains(self):
"""
Bins spike times passed to class at instantiation. Will not bin spike trains which did
not meet the criteria for minimum number of spiking trials. Must be run before the
NeuralGLM.fit() method is called.
"""
spkarrs = []
arrdiffs = []
for i in self.trialsdf.index:
duration = self.trialsdf.loc[i, 'duration']
durmod = duration % self.binwidth
if durmod > (self.binwidth / 2):
duration = duration - (self.binwidth / 2)
if len(self.spikes[i]) == 0:
arr = np.zeros((self.binf(duration), len(self.clu_ids)))
spkarrs.append(arr)
continue
spks = self.spikes[i]
clu = self.clu[i]
arr = bincount2D(spks, clu,
xbin=self.binwidth, ybin=self.clu_ids, xlim=[0, duration])[0]
arrdiffs.append(arr.shape[1] - self.binf(duration))
spkarrs.append(arr.T)
y = np.vstack(spkarrs)
if hasattr(self, 'dm'):
assert y.shape[0] == self.dm.shape[0], "Oh shit. Indexing error."
self.binnedspikes = y
return
def add_covariate_timing(self, covlabel, eventname, bases,
offset=0, deltaval=None, cond=None, desc=''):
"""
Convenience wrapper for adding timing event regressors to the GLM. Automatically generates
a one-hot vector for each trial as the regressor and adds the appropriate data structure
to the model.
Parameters
----------
covlabel : str
Label which the covariate will use. Can be accessed via dot syntax of the instance
usually.
eventname : str
Label of the column in trialsdf which has the event timing for each trial.
bases : numpy.array
nTB x nB array, i.e. number of time bins for the bases functions by number of bases.
Each column in the array is used together to describe the response of a unit to that
timing event.
offset : float, seconds
Offset of bases functions relative to timing event. Negative values will ensure that
deltaval : None, str, or pandas series, optional
Values of the kronecker delta function peak used to encode the event. If a string, the
column in trialsdf with that label will be used. If a pandas series with indexes
matching trialsdf, corresponding elements of the series will be the delta funtion val.
If None (default) height is 1.
cond : None, list, or fun, optional
Condition which to apply this covariate. Can either be a list of trial indices, or a
function which takes in rows of the trialsdf and returns booleans.
desc : str, optional
Additional information about the covariate, if desired. by default ''
"""
if covlabel in self.covar:
raise AttributeError(f'Covariate {covlabel} already exists in model.')
if self.compiled:
warn('Design matrix was already compiled once. Be sure to compile again if adding'
' additional covariates.')
if deltaval is None:
gainmod = False
elif isinstance(deltaval, pd.Series):
gainmod = True
elif isinstance(deltaval, str) and deltaval in self.trialsdf.columns:
gainmod = True
deltaval = self.trialsdf[deltaval]
else:
raise TypeError(f'deltaval must be None or pandas series. {type(deltaval)} '
'was passed instead.')
if eventname not in self.vartypes:
raise ValueError('Event name specified not found in trialsdf')
elif self.vartypes[eventname] != 'timing':
raise TypeError(f'Column {eventname} in trialsdf is not registered as a timing')
vecsizes = self.trialsdf['duration'].apply(self.binf)
stiminds = self.trialsdf[eventname].apply(self.binf)
stimvecs = []
for i in self.trialsdf.index:
vec = np.zeros(vecsizes[i])
if gainmod:
vec[stiminds[i]] = deltaval[i]
else:
vec[stiminds[i]] = 1
stimvecs.append(vec.reshape(-1, 1))
regressor = pd.Series(stimvecs, index=self.trialsdf.index)
self.add_covariate(covlabel, regressor, bases, offset, cond, desc)
return
def add_covariate_boxcar(self, covlabel, boxstart, boxend,
cond=None, height=None, desc=''):
if covlabel in self.covar:
raise AttributeError(f'Covariate {covlabel} already exists in model.')
if self.compiled:
warn('Design matrix was already compiled once. Be sure to compile again if adding'
' additional covariates.')
if boxstart not in self.trialsdf.columns or boxend not in self.trialsdf.columns:
raise KeyError('boxstart or boxend not found in trialsdf columns.')
if self.vartypes[boxstart] != 'timing':
raise TypeError(f'Column {boxstart} in trialsdf is not registered as a timing. '
'boxstart and boxend need to refer to timining events in trialsdf.')
if self.vartypes[boxend] != 'timing':
raise TypeError(f'Column {boxend} in trialsdf is not registered as a timing. '
'boxstart and boxend need to refer to timining events in trialsdf.')
if isinstance(height, str):
if height in self.trialsdf.columns:
height = self.trialsdf[height]
else:
raise KeyError(f'{height} is str not in columns of trialsdf')
elif isinstance(height, pd.Series):
if not all(height.index == self.trialsdf.index):
raise IndexError('Indices of height series does not match trialsdf.')
elif height is None:
height = pd.Series(np.ones(len(self.trialsdf.index)), index=self.trialsdf.index)
vecsizes = self.trialsdf['duration'].apply(self.binf)
stind = self.trialsdf[boxstart].apply(self.binf)
endind = self.trialsdf[boxend].apply(self.binf)
stimvecs = []
for i in self.trialsdf.index:
bxcar = np.zeros(vecsizes[i])
bxcar[stind[i]:endind[i] + 1] = height[i]
stimvecs.append(bxcar)
regressor = pd.Series(stimvecs, index=self.trialsdf.index)
self.add_covariate(covlabel, regressor, None, cond, desc)
return
def add_covariate_raw(self, covlabel, raw,
cond=None, desc=''):
stimlens = self.trialsdf.duration.apply(self.binf)
if isinstance(raw, str):
if raw not in self.trialsdf.columns:
raise KeyError(f'String {raw} not found in columns of trialsdf. Strings must'
'refer to valid column names.')
covseries = self.trialsdf[raw]
if np.any(covseries.apply(len) != stimlens):
raise IndexError(f'Some array shapes in {raw} do not match binned duration.')
self.add_covariate(covlabel, covseries, None, cond=cond)
if callable(raw):
try:
covseries = self.trialsdf.apply(raw, axis=1)
except Exception:
raise TypeError('Function for raw covariate generation did not run properly.'
'Make sure that the function passed takes in rows of trialsdf.')
if np.any(covseries.apply(len) != stimlens):
raise IndexError(f'Some array shapes in {raw} do not match binned duration.')
self.add_covariate(covlabel, covseries, None, cond=cond)
if isinstance(raw, pd.Series):
if np.any(raw.index != self.trialsdf.index):
raise IndexError('Indices of raw do not match indices of trialsdf.')
if np.any(raw.apply(len) != stimlens):
raise IndexError(f'Some array shapes in {raw} do not match binned duration.')
self.add_covariate(covlabel, raw, None, cond=cond)
def add_covariate(self, covlabel, regressor, bases,
offset=0, cond=None, desc=''):
"""
Parent function to add covariates to model object. Takes a regressor in the form of a
pandas Series object, a T x M array of M bases, and stores them for use in the design
matrix generation.
Parameters
----------
covlabel : str
Label for the covariate being added. Will be exposed, if possible, through
(instance).(covlabel) attribute.
regressor : pandas.Series
Series in which each element is the value(s) of a regressor for a trial at that index.
These will be convolved with the bases functions (if provided) to produce the
components of the design matrix. *Regressor must be (T / dt) x 1 array for each trial*
bases : numpy.array or None
T x M array of M basis functions over T timesteps. Columns will be convolved with the
elements of `regressor` to produce elements of the design matrix. If None, it is
assumed a raw regressor is being used.
offset : int, optional
Offset of the regressor from the bases during convolution. Negative values indicate
that the firing of the unit will be , by default 0
cond : list or func, optional
Condition for which to apply covariate. Either a list of trials which the covariate
applies to, or a function of the form f(dataframerow) which returns a boolean,
by default None
desc : str, optional
Description of the covariate for reference purposes, by default '' (empty)
"""
if covlabel in self.covar:
raise AttributeError(f'Covariate {covlabel} already exists in model.')
if self.compiled:
warn('Design matrix was already compiled once. Be sure to compile again if adding'
' additional covariates.')
# Test for mismatch in length of regressor vs trials
mismatch = np.zeros(len(self.trialsdf.index), dtype=bool)
for i in self.trialsdf.index:
currtr = self.trialsdf.loc[i]
nT = self.binf(currtr.duration)
if regressor.loc[i].shape[0] != nT:
mismatch[i] = True
if np.any(mismatch):
raise ValueError('Length mismatch between regressor and trial on trials'
f'{np.argwhere(mismatch)}.')
# Initialize containers for the covariate dicts
if not hasattr(self, 'currcol'):
self.currcol = 0
if callable(cond):
cond = self.trialsdf.index[self.trialsdf.apply(cond, axis=1)].to_numpy()
if not all(regressor.index == self.trialsdf.index):
raise IndexError('Indices of regressor and trials dataframes do not match.')
cov = {'description': desc,
'bases': bases,
'valid_trials': cond if cond is not None else self.trialsdf.index,
'offset': offset,
'regressor': regressor,
'dmcol_idx': np.arange(self.currcol, self.currcol + bases.shape[1])
if bases is not None else self.currcol}
if bases is None:
self.currcol += 1
else:
self.currcol += bases.shape[1]
self.covar[covlabel] = cov
return
def compile_design_matrix(self, dense=True):
"""
Compiles design matrix for the current experiment based on the covariates which were added
with the various NeuralGLM.add_covariate methods available. Can optionally compile a sparse
design matrix using the scipy.sparse package, however that method may take longer depending
on the degree of sparseness.
Parameters
----------
dense : bool, optional
Whether or not to compute a dense design matrix or a sparse one, by default True
"""
covars = self.covar
# Go trial by trial and compose smaller design matrices
miniDMs = []
rowtrials = []
for i, trial in self.trialsdf.iterrows():
nT = self.binf(trial.duration)
miniX = np.zeros((nT, self.currcol))
rowlabs = np.ones((nT, 1), dtype=int) * i
for cov in covars.values():
sidx = cov['dmcol_idx']
# Optionally use cond to filter out which trials to apply certain regressors,
if i not in cov['valid_trials']:
continue
stim = cov['regressor'][i]
# Convolve Kernel or basis function with stimulus or regressor
if cov['bases'] is None:
miniX[:, sidx] = stim
else:
if len(stim.shape) == 1:
stim = stim.reshape(-1, 1)
miniX[:, sidx] = convbasis(stim, cov['bases'], self.binf(cov['offset']))
# Sparsify convolved result and store in miniDMs
if dense:
miniDMs.append(miniX)
else:
miniDMs.append(sp.lil_matrix(miniX))
rowtrials.append(rowlabs)
if dense:
dm = np.vstack(miniDMs)
else:
dm = sp.vstack(miniDMs).to_csc()
trlabels = np.vstack(rowtrials)
if hasattr(self, 'binnedspikes'):
assert self.binnedspikes.shape[0] == dm.shape[0], "Oh shit. Indexing error."
self.dm = dm
self.trlabels = trlabels
# self.dm = np.roll(dm, -1, axis=0) # Fix weird +1 offset bug in design matrix
self.compiled = True
return
def _fit_sklearn(self, dm, binned, alpha, cells=None, retvar=False, noncovwarn=True):
"""
Fit a GLM using scikit-learn implementation of PoissonRegressor. Uses a regularization
strength parameter alpha, which is the strength of ridge regularization term. When alpha
is set to 0, this *should* in theory be the same as _fit_minimize, but in practice it is
not and seems to exhibit some regularization still.
Parameters
----------
dm : numpy.ndarray
Design matrix, in which rows are observations and columns are regressor values. Should
NOT contain a bias column for the intercept. Scikit-learn handles that.
binned : numpy.ndarray
Vector of observed spike counts which we seek to predict. Must be of the same length
as dm.shape[0]
alpha : float
Regularization strength, applied as multiplicative constant on ridge regularization.
cells : list
List of cells which should be fit. If None is passed, will default to fitting all cells
in clu_ids
variances : bool
Whether or not to return variances on parameters in dm.
"""
if cells is None:
cells = self.clu_ids.flatten()
coefs = pd.Series(index=cells, name='coefficients', dtype=object)
intercepts = pd.Series(index=cells, name='intercepts')
variances = pd.Series(index=cells, name='variances', dtype=object)
nonconverged = []
for cell in tqdm(cells, 'Fitting units:', leave=False):
cell_idx = np.argwhere(self.clu_ids == cell)[0, 0]
cellbinned = binned[:, cell_idx]
with catch_warnings(record=True) as w:
fitobj = PoissonRegressor(alpha=alpha, max_iter=300).fit(dm,
cellbinned)
if len(w) != 0:
nonconverged.append(cell)
wts = np.concatenate([[fitobj.intercept_], fitobj.coef_], axis=0)
biasdm = np.pad(dm.copy(), ((0, 0), (1, 0)), 'constant', constant_values=1)
if retvar:
wvar = np.diag(np.linalg.inv(dd_neglog(wts, biasdm, cellbinned)))
else:
wvar = np.ones((wts.shape[0], wts.shape[0])) * np.nan
coefs.at[cell] = fitobj.coef_
variances.at[cell] = wvar[1:]
intercepts.at[cell] = fitobj.intercept_
if noncovwarn:
if len(nonconverged) != 0:
warn(f'Fitting did not converge for some units: {nonconverged}')
return coefs, intercepts, variances
def _fit_pytorch(self, dm, binned, cells=None, retvar=False, epochs=500, optim='adam',
lr=1.0):
"""
Fit the GLM using PyTorch on GPU(s). Regularization has not been applied yet.
Parameters
----------
dm : numpy.ndarray
Design matrix, in which rows are observations and columns are regressor values. First
column must be a bias column of ones.
binned : numpy.ndarray
Vector of observed spike counts which we seek to predict. Must be of the same length
as dm.shape[0]
cells : list
List of cells which should be fit. If None is passed, will default to fitting all cells
in clu_ids
variances : bool
Whether or not to return variances on parameters in dm.
epochs : int
The number of epochs to train the model
optim : string
The name of optimization method in pytorch
lr : float
Learning rate for the optimizer
"""
if cells is None:
cells = self.clu_ids.flatten()
coefs = pd.Series(index=cells, name='coefficients', dtype=object)
intercepts = | pd.Series(index=cells, name='intercepts') | pandas.Series |
""" Run relational network evaluation. """
import collections
import itertools
import json
from logging import Logger
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import pandas as pd
from typing import Dict, List, Tuple
from typing_extensions import Literal
import rdkit
from rdkit import Chem
from rdkit.Chem import rdmolops
import rdkit.Chem.Draw
from rdkit.Chem.Draw import rdMolDraw2D
import seaborn as sns
from sklearn.model_selection import train_test_split
# noinspection PyPackageRequirements
from tap import Tap
import torch
from tqdm import tqdm
from conformation.batch import Batch
from conformation.dataloader import DataLoader
from conformation.dataset import GraphDataset
from conformation.relational_utils import load_relational_checkpoint
from conformation.run_relational_training import Args as RelationalTrainArgs
from conformation.utils import param_count
class Args(Tap):
"""
System arguments.
"""
data_path: str # Path to metadata file
uid_path: str # Path to uid dictionary
binary_dict_path: str # Path to uid-binary dictionary
checkpoint_path: str # Directory of checkpoint to load saved model
save_dir: str # Directory for logger
batch_size: int = 10 # Batch size
dataset: Literal["train", "val", "test"] = "test" # Which dataset to evaluate
cuda: bool = False # Cuda availability
distance_analysis: bool = False # Whether or not to print selective bond/edge information
distance_analysis_lo: float = 1.0 # Lower distance analysis bound
distance_analysis_hi: float = 2.0 # Upper distance analysis bound
error_threshold: float = 0.05 # Decimal percentage corresponding to lower/upper error cutoffs for plotting
def simple_plot(array_x: np.ndarray, array_y: np.ndarray, x_label: str, y_label: str, save_path: str, size: float = 0.5,
x_lim: Tuple = None, y_lim: Tuple = None, color: str = "b") -> None:
"""
Plot one array against another.
:param array_x: Data measured on the x-axis.
:param array_y: Data measured on the y-axis.
:param x_label: x-axis label.
:param y_label: y-axis label.
:param save_path: Name to save figure as.
:param size: Marker size.
:param x_lim: x-axis limits.
:param y_lim: y-axis limits.
:param color: Color.
:return: None.
"""
sns.set_style("dark")
fig = sns.jointplot(array_x, array_y, xlim=x_lim, ylim=y_lim, s=size, color=color).set_axis_labels(x_label, y_label)
fig.savefig(save_path)
plt.close()
def double_axis_plot(array_x: np.ndarray, array_y_1: np.ndarray, array_y_2: np.ndarray, title: str, x_label: str,
y_label_1: str, y_label_2: str, save_path: str, color_1: str = 'tab:red',
color_2: str = 'tab:blue', style: str = 'o', size: float = 0.5, x_lim: Tuple = None,
y_lim_1: Tuple = None, y_lim_2: Tuple = None) -> None:
"""
Plot one array against two others using two separate y-axes.
:param array_x: Data measured on the x-axis.
:param array_y_1: Data measured on the left y-axis.
:param array_y_2: Data measured on the right y-axis.
:param title: Plot title.
:param x_label: x-axis label.
:param y_label_1: Left y-axis label.
:param y_label_2: Right y-axis label.
:param save_path: Name to save figure as.
:param color_1: Color of left axis info.
:param color_2: Color of right axis info.
:param style: Plot style.
:param size: Marker size.
:param x_lim: x-axis limits.
:param y_lim_1: Left y-axis limits.
:param y_lim_2: Right y-axis limits.
:return: None.
"""
sns.set()
fig, ax1 = plt.subplots()
color = color_1
ax1.set_xlabel(x_label)
ax1.set_ylabel(y_label_1, color=color)
ax1.plot(array_x, array_y_1, style, color=color, markersize=size)
ax1.tick_params(axis='y', labelcolor=color)
ax1.set_ylim(y_lim_1)
ax1.set_xlim(x_lim)
ax2 = ax1.twinx()
color = color_2
ax2.set_ylabel(y_label_2, color=color)
ax2.plot(array_x, array_y_2, style, color=color, markersize=size)
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_ylim(y_lim_2)
fig.tight_layout()
plt.title(title)
plt.savefig(save_path, bbox_inches='tight')
fig.clf()
plt.close(fig)
def tensor_to_list(tensor: torch.Tensor, destination_list: List) -> None:
"""
Transfer data from 1D tensor to list.
:param tensor: Tensor containing data.
:param destination_list: List.
:return: None
"""
for i in range(tensor.shape[0]):
destination_list.append(tensor[i])
def path_and_bond_extraction(batch: Batch, uid_dict: Dict, binary_dict: Dict, shortest_paths: List, bond_types: List,
triplet_types: List, carbon_carbon_ring_types: List, carbon_carbon_non_ring_types,
carbon_carbon_chain_types: List, carbon_carbon_no_chain_types: List,
carbon_carbon_aromatic_types: List, carbon_carbon_non_aromatic_types: List,
carbon_carbon_quadruplet_types: List, carbon_carbon_non_ring_molecules: List) -> None:
"""
Compute shortest path, bond type information for each edge and add to relevant lists.
:param batch: Data batch.
:param uid_dict: uid-smiles dictionary.
:param binary_dict: uid-binary dictionary.
:param shortest_paths: List containing shortest path lengths.
:param bond_types: List containing bond types (pars of atoms, not actual molecular bond types) for edges.
:param triplet_types: List containing bond types for "bonds" with shortest path length 2.
:param carbon_carbon_ring_types: List containing bond types for CC "bonds" in a ring.
:param carbon_carbon_non_ring_types: List containing bond types for CC "bonds" not in a ring.
:param carbon_carbon_chain_types: List containing bond types for CC "bonds" in a linear chain.
:param carbon_carbon_no_chain_types: List containing bond types for CC "bonds" not in a linear chain.
:param carbon_carbon_aromatic_types: List containing bond types for CC "bonds" in aromatic rings.
:param carbon_carbon_non_aromatic_types: List containing bond types for CC "bonds" in non-aromatic rings.
:param carbon_carbon_quadruplet_types: List containing bond types for "bonds" with shortest path length 3.
:param carbon_carbon_non_ring_molecules: List of molecules corresponding to CC no ring "bonds".
:return: None.
"""
for i in range(batch.uid.shape[0]):
# noinspection PyUnresolvedReferences
uid = batch.uid[i].item()
smiles = uid_dict[uid]
# noinspection PyUnresolvedReferences
mol = Chem.Mol(open(binary_dict[uid], "rb").read())
# Acquire the SMILES string with Hs removed corresponding to this molecule. Then, create a new molecule
# from this string, add Hs, and then use this molecule for drawing. Drawing in this way will generate
# an automated 2D conformation that has nothing to do with the conformation of the original molecule, but that
# is easy for viewing. In order to visualize a 2D representation of the actual molecule from the binary,
# Use the molecule from the binary file directly.
mol = Chem.AddHs(Chem.MolFromSmiles(Chem.MolToSmiles(Chem.RemoveHs(mol))))
for m, n in itertools.combinations(list(np.arange(mol.GetNumAtoms())), 2):
shortest_paths.append(len(rdmolops.GetShortestPath(mol, int(m), int(n))) - 1)
atom_a = str(mol.GetAtoms()[int(m)].GetSymbol())
atom_b = str(mol.GetAtoms()[int(n)].GetSymbol())
path_len = len(rdmolops.GetShortestPath(mol, int(m), int(n))) - 1
key = ''.join(sorted([atom_a, atom_b])) + str(path_len)
bond_types.append(key)
if path_len == 2:
atom_intermediate = mol.GetAtoms()[rdmolops.GetShortestPath(mol, int(m),
int(n))[1]].GetSymbol()
key = sorted([atom_a, atom_b])[0] + atom_intermediate + sorted([atom_a, atom_b])[1]
triplet_types.append(key)
else:
triplet_types.append(None)
# Determine if two Carbons are part of the same ring, and if so, what is the ring size
if atom_a == atom_b == "C":
ring_info = list(mol.GetRingInfo().AtomRings())
atom_a_idx = mol.GetAtoms()[int(m)].GetIdx()
atom_b_idx = mol.GetAtoms()[int(n)].GetIdx()
membership = [atom_a_idx in r and atom_b_idx in r for r in ring_info]
if sum(membership) > 0:
key = ''.join(sorted([atom_a, atom_b])) + str(path_len) + "-ring"
carbon_carbon_ring_types.append(key)
carbon_carbon_non_ring_types.append(None)
carbon_carbon_non_ring_molecules.append(None)
if mol.GetAtoms()[int(m)].GetIsAromatic() and mol.GetAtoms()[int(n)].GetIsAromatic():
key = ''.join(sorted([atom_a, atom_b])) + str(path_len) + "-aromatic"
carbon_carbon_aromatic_types.append(key)
carbon_carbon_non_aromatic_types.append(None)
else:
key = ''.join(sorted([atom_a, atom_b])) + str(path_len) + "-non-aromatic"
carbon_carbon_aromatic_types.append(None)
carbon_carbon_non_aromatic_types.append(key)
else:
key = ''.join(sorted([atom_a, atom_b])) + str(path_len) + "-non-ring"
carbon_carbon_ring_types.append(None)
carbon_carbon_non_ring_types.append(key)
carbon_carbon_non_ring_molecules.append([mol, m, n, uid, smiles])
carbon_carbon_aromatic_types.append(None)
carbon_carbon_non_aromatic_types.append(None)
path = rdmolops.GetShortestPath(mol, int(m), int(n))
all_carbon = True
for j in range(len(path)):
atom_type = mol.GetAtoms()[path[j]].GetSymbol()
if atom_type != "C":
all_carbon = False
if all_carbon:
key = ''.join(sorted([atom_a, atom_b])) + str(path_len) + "-chain"
carbon_carbon_chain_types.append(key)
carbon_carbon_no_chain_types.append(None)
else:
key = ''.join(sorted([atom_a, atom_b])) + str(path_len) + "-non-chain"
carbon_carbon_chain_types.append(None)
carbon_carbon_no_chain_types.append(key)
if path_len == 3:
atom_intermediate_1 = mol.GetAtoms()[rdmolops.GetShortestPath(mol, int(m),
int(n))[1]].GetSymbol()
atom_intermediate_2 = mol.GetAtoms()[rdmolops.GetShortestPath(mol, int(m),
int(n))[2]].GetSymbol()
intermediates = sorted([atom_intermediate_1, atom_intermediate_2])
key = atom_a + intermediates[0] + intermediates[1] + atom_b
carbon_carbon_quadruplet_types.append(key)
else:
carbon_carbon_quadruplet_types.append(None)
else:
carbon_carbon_ring_types.append(None)
carbon_carbon_non_ring_types.append(None)
carbon_carbon_non_ring_molecules.append(None)
carbon_carbon_chain_types.append(None)
carbon_carbon_no_chain_types.append(None)
carbon_carbon_aromatic_types.append(None)
carbon_carbon_non_aromatic_types.append(None)
carbon_carbon_quadruplet_types.append(None)
def create_bond_dictionary(types: List, dictionary: Dict, *args: np.ndarray) -> None:
"""
Create a dictionary from a list of keys and values.
:param types: List of keys.
:param dictionary: Dictionary object.
:param args: Lists of values.
:return: None
"""
# Fill dictionary
for i in range(len(types)):
if types[i] is not None:
values = []
for val in args:
values += [val[i]]
if types[i] in dictionary:
dictionary[types[i]].append(values)
else:
dictionary[types[i]] = [values]
# Turn value lists into numpy arrays
for key in dictionary:
dictionary[key] = np.array(dictionary[key])
def pair_bar_plots(list_1: List, list_2: List, save_name_1: str, save_name_2: str, args: Args,
y_lim: Tuple[float, float] = None) -> None:
"""
Plot pair of bar plots.
:param list_1: One list of data.
:param list_2: Another list of data.
:param save_name_1: Plot save name 1.
:param save_name_2: Plot save name 2.
:param y_lim: Shared y_lim values.
:param args: System arguments.
:return: None
"""
counter = collections.Counter(list_1)
counter_keys_1 = []
counter_values_1 = []
for key, value in counter.items():
counter_keys_1.append(key)
counter_values_1.append(value / len(list_1))
counter = collections.Counter(list_2)
counter_keys_2 = []
counter_values_2 = []
for key, value in counter.items():
counter_keys_2.append(key)
counter_values_2.append(value / len(list_2))
total_keys = set(counter_keys_1 + counter_keys_2)
for i in total_keys:
if i not in counter_keys_1:
counter_keys_1.append(i)
counter_values_1.append(0.0)
if i not in counter_keys_2:
counter_keys_2.append(i)
counter_values_2.append(0.0)
counter_keys_sorted = list(np.array(counter_keys_1)[np.argsort(counter_keys_1)])
counter_values_sorted = list(np.array(counter_values_1)[np.argsort(counter_keys_1)])
df = pd.DataFrame({"groups": counter_keys_sorted, "percent": counter_values_sorted})
ax = sns.barplot(x="groups", y="percent", data=df)
if y_lim is not None:
ax.set_ylim(y_lim)
ax.figure.savefig(os.path.join(args.save_dir, save_name_1))
plt.close()
counter_keys_sorted = list(np.array(counter_keys_2)[np.argsort(counter_keys_2)])
counter_values_sorted = list(np.array(counter_values_2)[np.argsort(counter_keys_2)])
df = | pd.DataFrame({"groups": counter_keys_sorted, "percent": counter_values_sorted}) | pandas.DataFrame |
import pandas as pd
def write_excel(excel_path:str, plans_dict):
with pd.ExcelWriter(excel_path) as writer:
target_name_list = list(plans_dict.keys())
for target_name in target_name_list:
target_plans = plans_dict[target_name]["plans"]
target_count = plans_dict[target_name]["count"]
head_df = | pd.DataFrame({"目标产物": [target_name], "产量/s": [target_count]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('diabetes.csv')
df
df.shape
df.info()
df.describe()
print(df['Outcome'].value_counts())
o = df['Outcome'].value_counts().plot(kind="bar")
# 0 Non-Diabetic
# 1 Diabetic
#Correlation between all the features before cleaning
plt.figure(figsize=(12,10))
# seaborn has an easy method to showcase heatmap
p = sns.heatmap(df.corr(), annot=True,cmap ='RdYlGn')
df.groupby('Outcome').mean()
df.isnull().sum()
df = df.rename(columns={'DiabetesPedigreeFunction': 'DPF'})
df_copy = df.copy(deep=True)
df_copy[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']] = df_copy[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']].replace(0,np.NaN)
# Showing the Count of NANs
print(df_copy.isnull().sum())
df_copy['Glucose'].fillna(df_copy['Glucose'].mean(), inplace = True)
df_copy['BloodPressure'].fillna(df_copy['BloodPressure'].mean(), inplace = True)
df_copy['SkinThickness'].fillna(df_copy['SkinThickness'].mean(), inplace = True)
df_copy['Insulin'].fillna(df_copy['Insulin'].median(), inplace = True)
df_copy['BMI'].fillna(df_copy['BMI'].median(), inplace = True)
# separating the data and labels
X = df.drop(columns = 'Outcome', axis=1)
Y = df['Outcome']
print(X.head())
print(Y.head())
from sklearn.model_selection import train_test_split
X = df.drop(columns='Outcome')
y = df['Outcome']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.35)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=205)
classifier.fit(X_train, y_train)
#Getting the accuracy score for Random Forest
from sklearn.metrics import accuracy_score
# accuracy score on the training data
X_train_prediction = classifier.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, y_train)
print('Accuracy score of the training data : ', training_data_accuracy)
# accuracy score on the test data
X_test_prediction = classifier.predict(X_test)
test_data_accuracy = accuracy_score(X_test_prediction, y_test)
print('Accuracy score of the test data : ', test_data_accuracy)
classifier.feature_importances_
( | pd.Series(classifier.feature_importances_, index=X.columns) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue( | is_integer_dtype(left['baz']) | pandas.types.common.is_integer_dtype |
'''
Nota: Los precios se indican para todas las propiedades. Sin embargo, las edades están disponibles sólo para individuos y no para empresas.
Las filas que incluyen empresas (no tienen un valor de edad) son automáticamente ignoradas en todas las medidas de
'''
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sidetable
from IPython.display import display
import tkinter as tk
from pandastable import Table, TableModel
'''Cargamos la data'''
df = pd.read_csv('./semana_3/sabado/bienes_raices.csv')
'''
Limpiamos datos con espacio vacios y elimnamos todo
valor en nulo
'''
df = df.replace(' ',pd.NA)
df = df.dropna(subset=['edad_compra'])
'''
Cambiamos el tipo de dato object a numerico
'''
df['edad_compra'] = | pd.to_numeric(df['edad_compra']) | pandas.to_numeric |
from datetime import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
# Get Data
confirmed = | pd.read_csv(
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv') | pandas.read_csv |
#!/usr/bin/python
# coding=utf-8
import numpy as np
from scipy.interpolate import Rbf
import matplotlib.pyplot as plt
import sys
from math import pi, sqrt, degrees, acos
import os
import pandas as pd
import warnings
from scipy.constants import physical_constants
from shapely.geometry import LineString, Point
m_p = physical_constants['proton mass'][0]
def isnamedtupleinstance(x):
"""
:param x:
:return:
"""
t = type(x)
b = t.__bases__
if len(b) != 1 or b[0] != tuple:
return False
f = getattr(t, '_fields', None)
if not isinstance(f, tuple):
return False
return all(type(n) == str for n in f)
def iterate_namedtuple(object, df):
if isnamedtupleinstance(object):
for key, item in object._asdict().items():
if isnamedtupleinstance(item):
iterate_namedtuple(item, df)
else:
df[key] = pd.Series(item.flatten(), name=key)
else:
pass
return df
# isclose is included in python3.5+, so you can delete this if the code ever gets ported into python3.5+
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def print_progress(iteration, total, prefix='', suffix='', decimals=0, bar_length=50):
"""creates a progress bar
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * ((iteration + 1) / float(total)))
filled_length = int(round(bar_length * (iteration + 1) / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix))
def calc_cell_pts(neut):
sys.setrecursionlimit(100000)
def loop(neut, oldcell, curcell, cellscomplete, xcoords, ycoords):
beta[curcell, :neut.nSides[curcell]] = np.cumsum(
np.roll(neut.angles[curcell, :neut.nSides[curcell]], 1) - 180) + 180
# if first cell:
if oldcell == 0 and curcell == 0:
# rotate cell by theta0 value (specified)
beta[curcell, :neut.nSides[curcell]] = beta[curcell, :neut.nSides[curcell]] + neut.cell1_theta0
x_comp = np.cos(np.radians(beta[curcell, :neut.nSides[curcell]])) * neut.lsides[curcell,
:neut.nSides[curcell]]
y_comp = np.sin(np.radians(beta[curcell, :neut.nSides[curcell]])) * neut.lsides[curcell,
:neut.nSides[curcell]]
xcoords[curcell, :neut.nSides[curcell]] = np.roll(np.cumsum(x_comp), 1) + neut.cell1_ctr_x
ycoords[curcell, :neut.nSides[curcell]] = np.roll(np.cumsum(y_comp), 1) + neut.cell1_ctr_y
# for all other cells:
else:
# adjust all values in beta for current cell such that the side shared
# with oldcell has the same beta as the oldcell side
oldcell_beta = beta[oldcell, :][np.where(neut.adjCell[oldcell, :] == curcell)][0]
delta_beta = beta[curcell, np.where(neut.adjCell[curcell, :] == oldcell)] + 180 - oldcell_beta
beta[curcell, :neut.nSides[curcell]] = beta[curcell, :neut.nSides[curcell]] - delta_beta
# calculate non-shifted x- and y- coordinates
x_comp = np.cos(np.radians(beta[curcell, :neut.nSides[curcell]])) * neut.lsides[curcell,
:neut.nSides[curcell]]
y_comp = np.sin(np.radians(beta[curcell, :neut.nSides[curcell]])) * neut.lsides[curcell,
:neut.nSides[curcell]]
xcoords[curcell, :neut.nSides[curcell]] = np.roll(np.cumsum(x_comp),
1) # xcoords[oldcell,np.where(neut.adjCell[oldcell,:]==curcell)[0][0]]
ycoords[curcell, :neut.nSides[curcell]] = np.roll(np.cumsum(y_comp),
1) # ycoords[oldcell,np.where(neut.adjCell[oldcell,:]==curcell)[0][0]]
cur_in_old = np.where(neut.adjCell[oldcell, :] == curcell)[0][0]
old_in_cur = np.where(neut.adjCell[curcell, :] == oldcell)[0][0]
mdpt_old_x = (xcoords[oldcell, cur_in_old] + np.roll(xcoords[oldcell, :], -1)[cur_in_old]) / 2
mdpt_old_y = (ycoords[oldcell, cur_in_old] + np.roll(ycoords[oldcell, :], -1)[cur_in_old]) / 2
mdpt_cur_x = (xcoords[curcell, old_in_cur] + np.roll(xcoords[curcell, :], -1)[old_in_cur]) / 2
mdpt_cur_y = (ycoords[curcell, old_in_cur] + np.roll(ycoords[curcell, :], -1)[old_in_cur]) / 2
xshift = mdpt_old_x - mdpt_cur_x
yshift = mdpt_old_y - mdpt_cur_y
xcoords[curcell, :] = xcoords[curcell,
:] + xshift # xcoords[oldcell,np.where(neut.adjCell[oldcell,:]==curcell)[0][0]]
ycoords[curcell, :] = ycoords[curcell,
:] + yshift # ycoords[oldcell,np.where(neut.adjCell[oldcell,:]==curcell)[0][0]]
# continue looping through adjacent cells
for j, newcell in enumerate(neut.adjCell[curcell, :neut.nSides[curcell]]):
# if the cell under consideration is a normal cell (>3 sides) and not complete, then move into that cell and continue
if neut.nSides[newcell] >= 3 and cellscomplete[newcell] == 0:
cellscomplete[newcell] = 1
loop(neut, curcell, newcell, cellscomplete, xcoords, ycoords)
return xcoords, ycoords
xcoords = np.zeros(neut.adjCell.shape)
ycoords = np.zeros(neut.adjCell.shape)
beta = np.zeros(neut.adjCell.shape) # beta is the angle of each side with respect to the +x axis.
## Add initial cell to the list of cells that are complete
cellscomplete = np.zeros(neut.nCells)
cellscomplete[0] = 1
xs, ys = loop(neut, 0, 0, cellscomplete, xcoords, ycoords)
return xs, ys
class NeutpyTools:
def __init__(self, neut=None):
# get vertices in R, Z geometry
self.xs, self.ys = self.calc_cell_pts(neut)
# localize densities, ionization rates, and a few other parameters that might be needed.
self.n_n_slow = neut.nn.s
self.n_n_thermal = neut.nn.t
self.n_n_total = neut.nn.tot
self.izn_rate_slow = neut.izn_rate.s
self.izn_rate_thermal = neut.izn_rate.t
self.izn_rate_total = neut.izn_rate.tot
self.flux_in_s = neut.flux.inc.s
self.flux_in_t = neut.flux.inc.t
self.flux_in_tot = self.flux_in_s + self.flux_in_t
self.flux_out_s = neut.flux.out.s
self.flux_out_t = neut.flux.out.t
self.flux_out_tot = self.flux_out_s + self.flux_out_t
self.create_flux_outfile()
self.create_cell_outfile()
flux_s_xcomp, flux_s_ycomp, flux_s_mag = self.calc_flow('slow', norm=True)
flux_t_xcomp, flux_t_ycomp, flux_t_mag = self.calc_flow('thermal', norm=True)
flux_tot_xcomp, flux_tot_ycomp, flux_tot_mag = self.calc_flow('total', norm=True)
self.vars = {}
self.vars['n_n_slow'] = neut.nn.s
self.vars['n_n_thermal'] = neut.nn.t
self.vars['n_n_total'] = neut.nn.tot
self.vars['flux_s_xcomp'] = flux_s_xcomp
self.vars['flux_s_ycomp'] = flux_s_ycomp
self.vars['flux_s_mag'] = flux_s_mag
self.vars['flux_t_xcomp'] = flux_t_xcomp
self.vars['flux_t_ycomp'] = flux_t_ycomp
self.vars['flux_t_mag'] = flux_t_mag
self.vars['flux_tot_xcomp'] = flux_tot_xcomp
self.vars['flux_tot_ycomp'] = flux_tot_ycomp
self.vars['flux_tot_mag'] = flux_tot_mag
print('attempting to start plot_cell_vals')
self.plot_cell_vals()
def create_cell_outfile(self):
df = pd.DataFrame()
df['R'] = pd.Series(np.mean(self.xs, axis=1), name='R')
df['Z'] = pd.Series(np.mean(self.ys, axis=1), name='Z')
df['n_n_slow'] = pd.Series(self.n_n_slow, name='n_n_slow')
df['n_n_thermal'] = pd.Series(self.n_n_thermal, name='n_n_thermal')
df['n_n_total'] = | pd.Series(self.n_n_total, name='n_n_total') | pandas.Series |
from pysam import VariantFile, AlignmentFile
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
from itertools import combinations
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram
import skbio
from tqdm import tqdm
from covid_bronx.quality import fasta_files, sam_files, variant_files
from plot_helper import Plotter
coverage_levels = pd.read_csv("data/processed/sequencing/coverage.csv", index_col=0)['0']
passed = coverage_levels[coverage_levels>=.95].index
reinfection_samples = {
"data/final/reinfection/output/sample_barcode01" : "AECOM-123",
"data/final/reinfection/output/sample_barcode02" : "AECOM-124",
"data/final/reinfection/output/sample_barcode03" : "AECOM-125",
"data/final/reinfection/output/sample_barcode04" : "AECOM-126",
"data/final/reinfection/output/sample_barcode05" : "AECOM-127",
"data/final/reinfection/output/sample_barcode06" : "AECOM-128",
"data/final/reinfection/output/sample_barcode07" : "AECOM-129",
"data/final/reinfection/output/sample_barcode08" : "AECOM-130",
"data/final/reinfection/output/sample_barcode09" : "AECOM-131",
"data/final/reinfection2b/output/sample_barcode01" : "AECOM-103",
"data/final/reinfection2b/output/sample_barcode02" : "AECOM-104",
"data/final/reinfection2b/output/sample_barcode03" : "AECOM-105",
"data/final/reinfection2b/output/sample_barcode04" : "AECOM-106",
"data/final/reinfection2b/output/sample_barcode05" : "AECOM-107",
"data/final/reinfection2b/output/sample_barcode06" : "AECOM-108",
"data/final/reinfection2b/output/sample_barcode07" : "AECOM-109",
"data/final/reinfection2b/output/sample_barcode08" : "AECOM-110",
"data/final/reinfection2b/output/sample_barcode09" : "AECOM-111",
"data/final/reinfection2b/output/sample_barcode10" : "AECOM-112",
"data/final/reinfection2b/output/sample_barcode11" : "AECOM-113",
"data/final/reinfection2b/output/sample_barcode12" : "AECOM-114",
"data/final/reinfection2b/output/sample_barcode13" : "AECOM-115",
"data/final/reinfection2b/output/sample_barcode14" : "AECOM-116",
"data/final/reinfection2b/output/sample_barcode15" : "AECOM-117",
"data/final/reinfection2b/output/sample_barcode16" : "AECOM-118",
"data/final/reinfection2b/output/sample_barcode17" : "AECOM-119",
"data/final/reinfection2b/output/sample_barcode18" : "AECOM-120",
"data/final/reinfection2b/output/sample_barcode19" : "AECOM-121",
"data/final/reinfection2b/output/sample_barcode20" : "AECOM-122",
"data/final/reinfection2b/output/sample_barcode21" : "AECOM-126b",
}
vardf_all = []
all_samples = reinfection_samples
num_samples = len(all_samples)
base = "data/final/reinfection/output/"
# sam_files = {**sam_files, **{x: base + x + '.primertrimmed.rg.sorted.bam' for x in reinfection_samples}}
# fasta_files = {**fasta_files, **{x: base + x + '.consensus.fasta' for x in reinfection_samples}}
# variant_files = {**variant_files, **{x: base + x + '.merged.vcf' for x in reinfection_samples}}
for filename, sample_id in tqdm(all_samples.items()):
# Get reads to assess depth
sam_filename = filename + '.primertrimmed.rg.sorted.bam'
fasta_filename = filename + '.consensus.fasta'
variant_filename = filename + '.merged.vcf'
alignments = AlignmentFile(sam_filename).fetch()
consensus = list(skbio.read(fasta_filename, format="fasta"))[0]
coverage = np.zeros(29903)
for alignment in alignments:
coverage[alignment.positions] += 1
# Get variants
variants = VariantFile(variant_filename)
vardf_all.extend([
{
**{
key: value
for key, value
in var.info.items()
},
**{
"sample_id": sample_id,
"position": var.pos,
"quality": var.qual,
"reference": var.ref,
"alternates": var.alts,
"depth": coverage[var.pos],
},
}
for var in variants.fetch()
])
vardf_all = pd.DataFrame(vardf_all).set_index("sample_id")
vardf_all['score'] = vardf_all['quality'] / vardf_all['depth']
vardf = vardf_all[vardf_all['score']>3] # Filter out variants that are not high quality
x = [i for i in range(30000)]
y = [0 for _ in x]
y_ = [sum(vardf['position']==i)/num_samples for i in x]
coocurrence = | pd.crosstab(index=vardf.index, columns=vardf['position']) | pandas.crosstab |
import sys
import warnings
from bisect import bisect_left, bisect_right
from collections import Counter
from pathlib import Path
from textwrap import dedent
import allel
import dask
import dask.array as da
import ipinfo
import numba
import numpy as np
import pandas as pd
import xarray as xr
import zarr
from tqdm.auto import tqdm
from tqdm.dask import TqdmCallback
try:
# noinspection PyPackageRequirements
from google import colab
except ImportError:
colab = None
import malariagen_data
from . import veff
from .util import (
DIM_ALLELE,
DIM_PLOIDY,
DIM_SAMPLE,
DIM_VARIANT,
CacheMiss,
LoggingHelper,
Region,
da_compress,
da_from_zarr,
dask_compress_dataset,
hash_params,
init_filesystem,
init_zarr_store,
jitter,
locate_region,
read_gff3,
resolve_region,
type_error,
unpack_gff3_attributes,
xarray_concat,
)
# silence dask performance warnings
dask.config.set(**{"array.slicing.split_large_chunks": False})
PUBLIC_RELEASES = ("3.0",)
GCS_URL = "gs://vo_agam_release/"
GENESET_GFF3_PATH = (
"reference/genome/agamp4/Anopheles-gambiae-PEST_BASEFEATURES_AgamP4.12.gff3.gz"
)
GENOME_FASTA_PATH = (
"reference/genome/agamp4/Anopheles-gambiae-PEST_CHROMOSOMES_AgamP4.fa"
)
GENOME_FAI_PATH = (
"reference/genome/agamp4/Anopheles-gambiae-PEST_CHROMOSOMES_AgamP4.fa.fai"
)
GENOME_ZARR_PATH = (
"reference/genome/agamp4/Anopheles-gambiae-PEST_CHROMOSOMES_AgamP4.zarr"
)
# DEFAULT_SPECIES_ANALYSIS = "aim_20200422"
DEFAULT_SPECIES_ANALYSIS = "aim_20220528"
DEFAULT_SITE_FILTERS_ANALYSIS = "dt_20200416"
DEFAULT_COHORTS_ANALYSIS = "20211101"
CONTIGS = "2R", "2L", "3R", "3L", "X"
DEFAULT_GENOME_PLOT_WIDTH = 800 # width in px for bokeh genome plots
DEFAULT_GENES_TRACK_HEIGHT = 120 # height in px for bokeh genes track plots
DEFAULT_MAX_COVERAGE_VARIANCE = 0.2
AA_CHANGE_QUERY = (
"effect in ['NON_SYNONYMOUS_CODING', 'START_LOST', 'STOP_LOST', 'STOP_GAINED']"
)
# Note regarding release identifiers and storage paths. Within the
# data storage, we have used path segments like "v3", "v3.1", "v3.2",
# etc., to separate data from different releases. There is an inconsistency
# in this convention, because the "v3" should have been "v3.0". To
# make the API more consistent, we would like to use consistent release
# identifiers like "3.0", "3.1", "3.2", etc., as parameter values and
# when release identifiers are added to returned dataframes. In order to
# achieve this, below we define two functions that allow mapping between
# these consistent release identifiers, and the less consistent release
# storage path segments.
def _release_to_path(release):
"""Compatibility function, allows us to use release identifiers like "3.0"
and "3.1" in the public API, and map these internally into storage path
segments."""
if release == "3.0":
# special case
return "v3"
elif release.startswith("3."):
return f"v{release}"
else:
raise ValueError(f"Invalid release: {release!r}")
def _path_to_release(path):
"""Compatibility function, allows us to use release identifiers like "3.0"
and "3.1" in the public API, and map these internally into storage path
segments."""
if path == "v3":
return "3.0"
elif path.startswith("v3."):
return path[1:]
else:
raise RuntimeError(f"Unexpected release path: {path!r}")
class Ag3:
"""Provides access to data from Ag3.x releases.
Parameters
----------
url : str
Base path to data. Give "gs://vo_agam_release/" to use Google Cloud
Storage, or a local path on your file system if data have been
downloaded.
cohorts_analysis : str
Cohort analysis version.
species_analysis : {"aim_20200422", "pca_20200422"}, optional
Species analysis version.
site_filters_analysis : str, optional
Site filters analysis version.
bokeh_output_notebook : bool, optional
If True (default), configure bokeh to output plots to the notebook.
results_cache : str, optional
Path to directory on local file system to save results.
log : str or stream, optional
File path or stream output for logging messages.
debug : bool, optional
Set to True to enable debug level logging.
show_progress : bool, optional
If True, show a progress bar during longer-running computations.
check_location : bool, optional
If True, use ipinfo to check the location of the client system.
**kwargs
Passed through to fsspec when setting up file system access.
Examples
--------
Access data from Google Cloud Storage (default):
>>> import malariagen_data
>>> ag3 = malariagen_data.Ag3()
Access data downloaded to a local file system:
>>> ag3 = malariagen_data.Ag3("/local/path/to/vo_agam_release/")
Access data from Google Cloud Storage, with caching on the local file system
in a directory named "gcs_cache":
>>> ag3 = malariagen_data.Ag3(
... "simplecache::gs://vo_agam_release",
... simplecache=dict(cache_storage="gcs_cache"),
... )
Set up caching of some longer-running computations on the local file system,
in a directory named "results_cache":
>>> ag3 = malariagen_data.Ag3(results_cache="results_cache")
"""
contigs = CONTIGS
def __init__(
self,
url=GCS_URL,
cohorts_analysis=DEFAULT_COHORTS_ANALYSIS,
species_analysis=DEFAULT_SPECIES_ANALYSIS,
site_filters_analysis=DEFAULT_SITE_FILTERS_ANALYSIS,
bokeh_output_notebook=True,
results_cache=None,
log=sys.stdout,
debug=False,
show_progress=True,
check_location=True,
**kwargs,
):
self._url = url
self._pre = kwargs.pop("pre", False)
self._cohorts_analysis = cohorts_analysis
self._species_analysis = species_analysis
self._site_filters_analysis = site_filters_analysis
self._debug = debug
self._show_progress = show_progress
# set up logging
self._log = LoggingHelper(name=__name__, out=log, debug=debug)
# set up filesystem
self._fs, self._base_path = init_filesystem(url, **kwargs)
# set up caches
self._cache_releases = None
self._cache_sample_sets = dict()
self._cache_sample_set_to_release = None
self._cache_general_metadata = dict()
self._cache_species_calls = dict()
self._cache_site_filters = dict()
self._cache_snp_sites = None
self._cache_snp_genotypes = dict()
self._cache_genome = None
self._cache_annotator = None
self._cache_geneset = dict()
self._cache_cross_metadata = None
self._cache_site_annotations = None
self._cache_cnv_hmm = dict()
self._cache_cnv_coverage_calls = dict()
self._cache_cnv_discordant_read_calls = dict()
self._cache_haplotypes = dict()
self._cache_haplotype_sites = dict()
self._cache_cohort_metadata = dict()
self._cache_sample_metadata = dict()
self._cache_aim_variants = dict()
if results_cache is not None:
results_cache = Path(results_cache).expanduser().resolve()
results_cache.mkdir(parents=True, exist_ok=True)
self._results_cache = results_cache
# get bokeh to output plots to the notebook - this is a common gotcha,
# users forget to do this and wonder why bokeh plots don't show
if bokeh_output_notebook:
import bokeh.io as bkio
bkio.output_notebook(hide_banner=True)
# Occasionally, colab will allocate a VM outside the US, e.g., in
# Europe or Asia. Because the MalariaGEN data GCS bucket is located
# in the US, this is usually bad for performance, because of
# increased latency and lower bandwidth. Add a check for this and
# issue a warning if not in the US.
client_details = None
if check_location:
try:
client_details = ipinfo.getHandler().getDetails()
if GCS_URL in url and colab and client_details.country != "US":
warnings.warn(
dedent(
"""
Your currently allocated Google Colab VM is not located in the US.
This usually means that data access will be substantially slower.
If possible, select "Runtime > Factory reset runtime" from the menu
to request a new VM and try again.
"""
)
)
except OSError:
pass
self._client_details = client_details
@property
def _client_location(self):
details = self._client_details
if details is not None:
region = details.region
country = details.country
location = f"{region}, {country}"
if colab:
location += " (colab)"
elif hasattr(details, "hostname"):
hostname = details.hostname
if hostname.endswith("googleusercontent.com"):
location += " (Google Cloud)"
else:
location = "unknown"
return location
def __repr__(self):
text = (
f"<MalariaGEN Ag3 API client>\n"
f"Storage URL : {self._url}\n"
f"Data releases available : {', '.join(self.releases)}\n"
f"Results cache : {self._results_cache}\n"
f"Cohorts analysis : {self._cohorts_analysis}\n"
f"Species analysis : {self._species_analysis}\n"
f"Site filters analysis : {self._site_filters_analysis}\n"
f"Software version : malariagen_data {malariagen_data.__version__}\n"
f"Client location : {self._client_location}\n"
f"---\n"
f"Please note that data are subject to terms of use,\n"
f"for more information see https://www.malariagen.net/data\n"
f"or contact <EMAIL>. For API documentation see \n"
f"https://malariagen.github.io/vector-data/ag3/api.html"
)
return text
def _repr_html_(self):
html = f"""
<table class="malariagen-ag3">
<thead>
<tr>
<th style="text-align: left" colspan="2">MalariaGEN Ag3 API client</th>
</tr>
<tr><td colspan="2" style="text-align: left">
Please note that data are subject to terms of use,
for more information see <a href="https://www.malariagen.net/data">
the MalariaGEN website</a> or contact <EMAIL>.
See also the <a href="https://malariagen.github.io/vector-data/ag3/api.html">Ag3 API docs</a>.
</td></tr>
</thead>
<tbody>
<tr>
<th style="text-align: left">
Storage URL
</th>
<td>{self._url}</td>
</tr>
<tr>
<th style="text-align: left">
Data releases available
</th>
<td>{', '.join(self.releases)}</td>
</tr>
<tr>
<th style="text-align: left">
Results cache
</th>
<td>{self._results_cache}</td>
</tr>
<tr>
<th style="text-align: left">
Cohorts analysis
</th>
<td>{self._cohorts_analysis}</td>
</tr>
<tr>
<th style="text-align: left">
Species analysis
</th>
<td>{self._species_analysis}</td>
</tr>
<tr>
<th style="text-align: left">
Site filters analysis
</th>
<td>{self._site_filters_analysis}</td>
</tr>
<tr>
<th style="text-align: left">
Software version
</th>
<td>malariagen_data {malariagen_data.__version__}</td>
</tr>
<tr>
<th style="text-align: left">
Client location
</th>
<td>{self._client_location}</td>
</tr>
</tbody>
</table>
"""
return html
def _progress(self, iterable, **kwargs):
# progress doesn't mix well with debug logging
disable = self._debug or not self._show_progress
return tqdm(iterable, disable=disable, **kwargs)
def _dask_progress(self, **kwargs):
disable = not self._show_progress
return TqdmCallback(disable=disable, **kwargs)
@property
def releases(self):
"""The releases for which data are available at the given storage
location."""
if self._cache_releases is None:
if self._pre:
# Here we discover which releases are available, by listing the storage
# directory and examining the subdirectories. This may include "pre-releases"
# where data may be incomplete.
sub_dirs = [p.split("/")[-1] for p in self._fs.ls(self._base_path)]
releases = tuple(
sorted(
[
_path_to_release(d)
for d in sub_dirs
if d.startswith("v3")
and self._fs.exists(f"{self._base_path}/{d}/manifest.tsv")
]
)
)
if len(releases) == 0:
raise ValueError("No releases found.")
self._cache_releases = releases
else:
self._cache_releases = PUBLIC_RELEASES
return self._cache_releases
def _read_sample_sets(self, *, release):
"""Read the manifest of sample sets for a given release."""
release_path = _release_to_path(release)
path = f"{self._base_path}/{release_path}/manifest.tsv"
with self._fs.open(path) as f:
df = pd.read_csv(f, sep="\t", na_values="")
df["release"] = release
return df
def sample_sets(self, release=None):
"""Access a dataframe of sample sets.
Parameters
----------
release : str, optional
Release identifier. Give "3.0" to access the Ag1000G phase 3 data
release.
Returns
-------
df : pandas.DataFrame
A dataframe of sample sets, one row per sample set.
"""
if release is None:
# retrieve sample sets from all available releases
release = self.releases
if isinstance(release, str):
# retrieve sample sets for a single release
if release not in self.releases:
raise ValueError(f"Release not available: {release!r}")
try:
df = self._cache_sample_sets[release]
except KeyError:
df = self._read_sample_sets(release=release)
self._cache_sample_sets[release] = df
elif isinstance(release, (list, tuple)):
# check no duplicates
counter = Counter(release)
for k, v in counter.items():
if v > 1:
raise ValueError(f"Duplicate values: {k!r}.")
# retrieve sample sets from multiple releases
df = pd.concat(
[self.sample_sets(release=r) for r in release],
axis=0,
ignore_index=True,
)
else:
raise TypeError
return df.copy()
@property
def v3_wild(self):
"""Legacy, convenience property to access sample sets from the
3.0 release, excluding the lab crosses."""
return [
x
for x in self.sample_sets(release="3.0")["sample_set"].tolist()
if x != "AG1000G-X"
]
def _lookup_release(self, *, sample_set):
"""Find which release a sample set was included in."""
if self._cache_sample_set_to_release is None:
df_sample_sets = self.sample_sets().set_index("sample_set")
self._cache_sample_set_to_release = df_sample_sets["release"].to_dict()
try:
return self._cache_sample_set_to_release[sample_set]
except KeyError:
raise ValueError(f"No release found for sample set {sample_set!r}")
def _read_general_metadata(self, *, sample_set):
"""Read metadata for a single sample set."""
try:
df = self._cache_general_metadata[sample_set]
except KeyError:
release = self._lookup_release(sample_set=sample_set)
release_path = _release_to_path(release)
path = f"{self._base_path}/{release_path}/metadata/general/{sample_set}/samples.meta.csv"
with self._fs.open(path) as f:
df = pd.read_csv(f, na_values="")
# ensure all column names are lower case
df.columns = [c.lower() for c in df.columns]
# add a couple of columns for convenience
df["sample_set"] = sample_set
df["release"] = release
self._cache_general_metadata[sample_set] = df
return df.copy()
def _read_species_calls(self, *, sample_set):
"""Read species calls for a single sample set."""
key = sample_set
try:
df = self._cache_species_calls[key]
except KeyError:
release = self._lookup_release(sample_set=sample_set)
release_path = _release_to_path(release)
path_prefix = f"{self._base_path}/{release_path}/metadata"
if self._species_analysis == "aim_20220528":
path = f"{path_prefix}/species_calls_aim_20220528/{sample_set}/samples.species_aim.csv"
dtype = {
"aim_species_gambcolu_arabiensis": object,
"aim_species_gambiae_coluzzii": object,
"aim_species": object,
}
elif self._species_analysis == "aim_20200422":
# TODO this is legacy, deprecate at some point
path = f"{path_prefix}/species_calls_20200422/{sample_set}/samples.species_aim.csv"
dtype = {
"species_gambcolu_arabiensis": object,
"species_gambiae_coluzzii": object,
}
elif self._species_analysis == "pca_20200422":
# TODO this is legacy, deprecate at some point
path = f"{path_prefix}/species_calls_20200422/{sample_set}/samples.species_pca.csv"
dtype = {
"species_gambcolu_arabiensis": object,
"species_gambiae_coluzzii": object,
}
else:
raise ValueError(
f"Unknown species calling analysis: {self._species_analysis!r}"
)
with self._fs.open(path) as f:
df = pd.read_csv(
f,
na_values=["", "NA"],
# ensure correct dtype even where all values are missing
dtype=dtype,
)
# add a single species call column, for convenience
def consolidate_species(s):
species_gambcolu_arabiensis = s["species_gambcolu_arabiensis"]
species_gambiae_coluzzii = s["species_gambiae_coluzzii"]
if species_gambcolu_arabiensis == "arabiensis":
return "arabiensis"
elif species_gambcolu_arabiensis == "intermediate":
return "intermediate_arabiensis_gambiae"
elif species_gambcolu_arabiensis == "gamb_colu":
# look at gambiae_vs_coluzzii
if species_gambiae_coluzzii == "gambiae":
return "gambiae"
elif species_gambiae_coluzzii == "coluzzii":
return "coluzzii"
elif species_gambiae_coluzzii == "intermediate":
return "intermediate_gambiae_coluzzii"
else:
# some individuals, e.g., crosses, have a missing species call
return np.nan
if self._species_analysis == "aim_20200422":
# TODO this is legacy, deprecate at some point
df["species"] = df.apply(consolidate_species, axis=1)
# normalise column prefixes
df = df.rename(
columns={
"aim_fraction_arab": "aim_species_fraction_arab",
"aim_fraction_colu": "aim_species_fraction_colu",
"species_gambcolu_arabiensis": "aim_species_gambcolu_arabiensis",
"species_gambiae_coluzzii": "aim_species_gambiae_coluzzii",
"species": "aim_species",
}
)
elif self._species_analysis == "pca_20200422":
# TODO this is legacy, deprecate at some point
df["species"] = df.apply(consolidate_species, axis=1)
# normalise column prefixes
df = df.rename(
# normalise column prefixes
columns={
"PC1": "pca_species_PC1",
"PC2": "pca_species_PC2",
"species_gambcolu_arabiensis": "pca_species_gambcolu_arabiensis",
"species_gambiae_coluzzii": "pca_species_gambiae_coluzzii",
"species": "pca_species",
}
)
# ensure all column names are lower case
df.columns = [c.lower() for c in df.columns]
self._cache_species_calls[key] = df
return df.copy()
def _prep_sample_sets_arg(self, *, sample_sets):
"""Common handling for the `sample_sets` parameter. For convenience, we
allow this to be a single sample set, or a list of sample sets, or a
release identifier, or a list of release identifiers."""
if sample_sets is None:
# all available sample sets
sample_sets = self.sample_sets()["sample_set"].tolist()
elif isinstance(sample_sets, str):
if sample_sets.startswith("3."):
# convenience, can use a release identifier to denote all sample sets in a release
sample_sets = self.sample_sets(release=sample_sets)[
"sample_set"
].tolist()
else:
# single sample set, normalise to always return a list
sample_sets = [sample_sets]
elif isinstance(sample_sets, (list, tuple)):
# list or tuple of sample sets or releases
prepped_sample_sets = []
for s in sample_sets:
# make a recursive call to handle the case where s is a release identifier
sp = self._prep_sample_sets_arg(sample_sets=s)
# make sure we end up with a flat list of sample sets
if isinstance(sp, str):
prepped_sample_sets.append(sp)
else:
prepped_sample_sets.extend(sp)
sample_sets = prepped_sample_sets
else:
raise TypeError(
f"Invalid type for sample_sets parameter; expected str, list or tuple; found: {sample_sets!r}"
)
# check all sample sets selected at most once
counter = Counter(sample_sets)
for k, v in counter.items():
if v > 1:
raise ValueError(
f"Bad value for sample_sets parameter, {k:!r} selected more than once."
)
return sample_sets
def species_calls(self, sample_sets=None):
"""Access species calls for one or more sample sets.
Parameters
----------
sample_sets : str or list of str, optional
Can be a sample set identifier (e.g., "AG1000G-AO") or a list of
sample set identifiers (e.g., ["AG1000G-BF-A", "AG1000G-BF-B"] or a
release identifier (e.g., "3.0") or a list of release identifiers.
Returns
-------
df : pandas.DataFrame
A dataframe of species calls for one or more sample sets, one row
per sample.
"""
sample_sets = self._prep_sample_sets_arg(sample_sets=sample_sets)
# concatenate multiple sample sets
dfs = [self._read_species_calls(sample_set=s) for s in sample_sets]
df = pd.concat(dfs, axis=0, ignore_index=True)
return df
def _sample_metadata(self, *, sample_set):
df = self._read_general_metadata(sample_set=sample_set)
df_species = self._read_species_calls(sample_set=sample_set)
df = df.merge(df_species, on="sample_id", sort=False)
df_cohorts = self._read_cohort_metadata(sample_set=sample_set)
df = df.merge(df_cohorts, on="sample_id", sort=False)
return df
def sample_metadata(
self,
sample_sets=None,
sample_query=None,
):
"""Access sample metadata for one or more sample sets.
Parameters
----------
sample_sets : str or list of str, optional
Can be a sample set identifier (e.g., "AG1000G-AO") or a list of
sample set identifiers (e.g., ["AG1000G-BF-A", "AG1000G-BF-B"]) or a
release identifier (e.g., "3.0") or a list of release identifiers.
sample_query : str, optional
A pandas query string which will be evaluated against the sample
metadata e.g., "taxon == 'coluzzii' and country == 'Burkina Faso'".
Returns
-------
df_samples : pandas.DataFrame
A dataframe of sample metadata, one row per sample.
"""
sample_sets = self._prep_sample_sets_arg(sample_sets=sample_sets)
cache_key = tuple(sample_sets)
try:
df_samples = self._cache_sample_metadata[cache_key]
except KeyError:
# concatenate multiple sample sets
dfs = []
# there can be some delay here due to network latency, so show progress
sample_sets_iterator = self._progress(
sample_sets, desc="Load sample metadata"
)
for s in sample_sets_iterator:
df = self._sample_metadata(sample_set=s)
dfs.append(df)
df_samples = | pd.concat(dfs, axis=0, ignore_index=True) | pandas.concat |
# Copyright © 2019 <NAME>
"""
Test for the ``preprocess._aggregate_columns._difference`` module.
"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import unittest
# Tests for:
from ...clean_variables import VariableCleaner
class PreprocessConstantDifferenceTests(unittest.TestCase):
"""
Tests for the ``preprocess._aggregate_columns._difference`` module. Assert final data frames match expectations.
"""
@staticmethod
def test_clean_difference_ints_0():
"""Test subtracting 0 from a column."""
_input = DataFrame({"A": [1, 2, 3]})
_expected = DataFrame({"A": [1, 2, 3]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_ints_1():
"""Test subtracting 1 from a column."""
_input = DataFrame({"A": [1, 2, 3]})
_expected = DataFrame({"A": [0, 1, 2]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 1}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_floats_0():
"""Test subtracting 0.0 from a column."""
_input = DataFrame({"A": [1.0, 2.0, 3.0]})
_expected = DataFrame({"A": [1.0, 2.0, 3.0]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 0.0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_floats_negative_1():
"""Test subtracting -1.0 from a column."""
_input = DataFrame({"A": [1.0, 2.0, 3.0]})
_expected = DataFrame({"A": [2.0, 3.0, 4.0]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": -1.0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
class PreprocessVariableDifferenceTests(unittest.TestCase):
"""
Tests for the ``preprocess._aggregate_columns._difference`` module with column subtraction.
"""
@staticmethod
def test_clean_difference_int_column():
"""Test subtracting the right column from the left."""
_input = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]})
_expected = DataFrame({"A": [-1, -1, -1], "B": [2, 3, 4]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_right_string_column():
"""Test subtracting the right column from the left. Right column has strings."""
_input = DataFrame({"A": [1, 2, 3], "B": ["2", "3", "4"]})
_expected = DataFrame({"A": [-1.0, -1.0, -1.0], "B": ["2", "3", "4"]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_left_string_column():
"""Test subtracting the right column from the left. Left column has strings."""
_input = DataFrame({"A": ["1", "2", "3"], "B": [2, 3, 4]})
_expected = DataFrame({"A": [-1.0, -1.0, -1.0], "B": [2, 3, 4]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_both_string_column():
"""Test subtracting the right column from the left. Both left and right have strings."""
_input = DataFrame({"A": ["1", "2", "3"], "B": ["2", "3", "4"]})
_expected = | DataFrame({"A": [-1.0, -1.0, -1.0], "B": ["2", "3", "4"]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 8 20:59:11 2017
@author: changyaochen
"""
import pandas as pd
import os
import bokeh.plotting as bkp
import bokeh.models as bkm
from bokeh.resources import CDN
from bokeh.embed import file_html
from bokeh.models import (
GMapPlot, GMapOptions, ColumnDataSource, Circle, PanTool,
WheelZoomTool, BoxSelectTool, Range1d
)
from .config import google_map_api_key
def plot_income_pop(output='income'):
"""
for informative purpose, showing either the income or population
by census block
"""
dname = os.path.dirname(os.path.abspath(__file__))
project_folder = '/'.join(dname.split('/')[:-1]) + '/NYC_bikeshare'
df = pd.read_csv(project_folder + '/data/NYC_income_population_lite.csv')
# ======== preparing the plot =======
map_options = GMapOptions(
lat=40.75, lng=-73.95, map_type="roadmap", zoom=12)
plot = GMapPlot(
x_range=Range1d(),
y_range=Range1d(),
map_options=map_options,
api_key=google_map_api_key
)
plot.title.text = 'Income and population by station'
# plot.api_key = google_map_api_key
source1 = bkp.ColumnDataSource(
data=dict(lat=df['centroid_lat'],
long=df['centroid_long'],
income_plot=df['median_income'] / 10000,
income=df['median_income'],
pop_plot=df['Population'] / 500,
pop=df['Population']))
if output == 'income':
circle1 = Circle(x='long', y='lat', fill_color='red',
fill_alpha=0.7, line_alpha=0, size='income_plot')
plot.add_glyph(source1, circle1)
plot.add_tools(PanTool(), WheelZoomTool(), BoxSelectTool())
hover = bkm.HoverTool(tooltips=[('income', '@income{$0,0}')])
plot.title.text = 'Median input. Data source: US Census'
plot.add_tools(hover)
bokeh_map = file_html(plot, CDN, "bokeh")
print('return the plot!')
# return plot
return bokeh_map
elif output == 'pop':
circle1 = Circle(x='long', y='lat', fill_color='blue',
fill_alpha=0.7, line_alpha=0, size='pop_plot')
plot.add_glyph(source1, circle1)
plot.add_tools(PanTool(), WheelZoomTool(), BoxSelectTool())
hover = bkm.HoverTool(tooltips=[('population', '@pop')])
plot.title.text = 'Population. Data source: US Census'
plot.add_tools(hover)
bokeh_html = file_html(plot, CDN, "bokeh")
# return plot
return bokeh_html
else:
raise
return
def plot_destination(start_station, end_neighborhoods, N=3):
"""
plot the top N most probable end neighborhoods
"""
dname = os.path.dirname(os.path.abspath(__file__))
project_folder = '/'.join(dname.split('/')[:-1]) + '/NYC_bikeshare'
df = pd.read_csv(project_folder + '/data/NYC_neighborhoods.csv')
df_for_plot = pd.DataFrame(end_neighborhoods)
df_for_plot = pd.merge(df_for_plot, df,
left_on='name', right_on='neighborhood',
how='left')
# lat_centroid = df_for_plot['latitude'].mean()
# long_centroid = df_for_plot['longitude'].mean()
df_station = | pd.read_csv(project_folder + '/data/NYC_bike_stations_v1.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert 'index' not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
assert df['a'].dtype == object
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
assert df['a'].dtype == np.float64
assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
assert result.index.name == 'order_id'
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
assert result.index.names == ('order_id', 'quantity')
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
tm.assert_index_equal(df.index, Index([1], name='id'))
assert df.index.name == 'id'
tm.assert_index_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
tm.assert_index_equal(df.index, Index([], name='id'))
assert df.index.name == 'id'
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({'EXPIRY': [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1])
for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = (DataFrame.from_records(tuples, columns=columns)
.reindex(columns=df.columns))
# created recarray and with to_records recarray (have dtype info)
result2 = (DataFrame.from_records(recarray, columns=columns)
.reindex(columns=df.columns))
result3 = (DataFrame.from_records(recarray2, columns=columns)
.reindex(columns=df.columns))
# list of tupels (no dtype info)
result4 = (DataFrame.from_records(lists, columns=columns)
.reindex(columns=df.columns))
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, pd.Index(lrange(8)))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index('C'), columns.index('E1')]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result['C'], df['C'])
tm.assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
assert len(result) == 0
tm.assert_index_equal(result.columns,
pd.Index(['foo', 'bar', 'baz']))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
asdict = {x: y for x, y in compat.iteritems(df)}
asdict2 = {x: y.values for x, y in compat.iteritems(df)}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(
asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns)
.reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns)
.reindex(columns=df.columns))
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
# should pass
df1 = DataFrame.from_records(df, index=['C'])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index='C')
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
pytest.raises(ValueError, DataFrame.from_records, df, index=[2])
pytest.raises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
expected = Index(['bar'])
assert len(result) == 0
assert result.index.name == 'foo'
tm.assert_index_equal(result.columns, expected)
def test_to_frame_with_falsey_names(self):
# GH 16114
result = Series(name=0).to_frame().dtypes
expected = Series({0: np.float64})
tm.assert_series_equal(result, expected)
result = DataFrame(Series(name=0)).dtypes
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, 'uint8', 'category'])
def test_constructor_range_dtype(self, dtype):
# GH 16804
expected = DataFrame({'A': [0, 1, 2, 3, 4]}, dtype=dtype or 'int64')
result = DataFrame({'A': range(5)}, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_frame_from_list_subclass(self):
# GH21226
class List(list):
pass
expected = DataFrame([[1, 2, 3], [4, 5, 6]])
result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])]))
tm.assert_frame_equal(result, expected)
class TestDataFrameConstructorWithDatetimeTZ(TestData):
def test_from_dict(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
dr = date_range('20130110', periods=3)
# construction
df = DataFrame({'A': idx, 'B': dr})
assert df['A'].dtype, 'M8[ns, US/Eastern'
assert df['A'].name == 'A'
tm.assert_series_equal(df['A'], Series(idx, name='A'))
tm.assert_series_equal(df['B'], Series(dr, name='B'))
def test_from_index(self):
# from index
idx2 = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2['foo'], Series(idx2, name='foo'))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2['foo'], Series(idx2, name='foo'))
idx2 = date_range('20130101', periods=3, tz='US/Eastern')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2[0], Series(idx2, name=0))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2[0], Series(idx2, name=0))
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
assert d['B'].isna().all()
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_timeseries_column(self):
# GH19157
dr = date_range(start='20130101T10:00:00', periods=3, freq='T',
tz='US/Eastern')
result = DataFrame(dr, columns=['timestamps'])
expected = DataFrame({'timestamps': [
Timestamp('20130101T10:00:00', tz='US/Eastern'),
Timestamp('20130101T10:01:00', tz='US/Eastern'),
Timestamp('20130101T10:02:00', tz='US/Eastern')]})
tm.assert_frame_equal(result, expected)
def test_nested_dict_construction(self):
# GH22227
columns = ['Nevada', 'Ohio']
pop = {'Nevada': {2001: 2.4, 2002: 2.9},
'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}
result = pd.DataFrame(pop, index=[2001, 2002, 2003], columns=columns)
expected = pd.DataFrame(
[(2.4, 1.7), (2.9, 3.6), (np.nan, np.nan)],
columns=columns,
index= | pd.Index([2001, 2002, 2003]) | pandas.Index |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with | tm.assertRaisesRegexp(TypeError, msg) | pandas.util.testing.assertRaisesRegexp |
import os
import zipfile
import csv
import pandas as pd
import requests
import json
from itertools import islice
import sklearn.preprocessing
from lightfm.data import Dataset
import numpy as np
from lightfm import LightFM, lightfm
from lightfm.evaluation import auc_score
from scipy import sparse
from sklearn.metrics.pairwise import cosine_similarity
# *********************************************************************
def create_item_dict(df, id_col, name_col,author_col):
'''
Function to create an item dictionary based on their item_id and item name
Required Input -
- df = Pandas dataframe with Item information
- id_col = Column name containing unique identifier for an item
- name_col = Column name containing name of the item
Expected Output -
item_dict = Dictionary type output containing item_id as key and item_name as value
'''
item_dict = {}
for i in range(df.shape[0]):
item_dict[(df.loc[i, id_col])] = df.loc[i, name_col] +' : '+df.loc[i, author_col]
return item_dict
# *****************************************************************************************
def create_interaction_matrix(df, user_col, item_col, rating_col, norm=False, threshold=None):
'''
Function to create an interaction matrix dataframe from transactional type interactions
Required Input -
- df = Pandas DataFrame containing user-item interactions
- user_col = column name containing user's identifier
- item_col = column name containing item's identifier
- rating col = column name containing user feedback on interaction with a given item
- norm (optional) = True if a normalization of ratings is needed
- threshold (required if norm = True) = value above which the rating is favorable
Expected output -
- Pandas dataframe with user-item interactions ready to be fed in a recommendation algorithm
'''
interactions = df.groupby([user_col, item_col])[rating_col] \
.sum().unstack().reset_index(). \
fillna(0).set_index(user_col)
if norm:
interactions = interactions.applymap(lambda x: 1 if x > threshold else 0)
return interactions
# ************************************************************************************
def create_item_emdedding_distance_matrix(model, interactions):
'''
Function to create item-item distance embedding matrix
Required Input -
- model = Trained matrix factorization model
- interactions = dataset used for training the model
Expected Output -
- item_emdedding_distance_matrix = Pandas dataframe containing cosine distance matrix b/w items
'''
df_item_norm_sparse = sparse.csr_matrix(model.item_embeddings)
similarities = cosine_similarity(df_item_norm_sparse)
item_emdedding_distance_matrix = pd.DataFrame(similarities)
item_emdedding_distance_matrix.columns = interactions.columns
item_emdedding_distance_matrix.index = interactions.columns
return item_emdedding_distance_matrix
# *****************************************************************************
def item_item_recommendation(item_emdedding_distance_matrix, item_id,
item_dict, n_items=10, show=True):
'''
Function to create item-item recommendation
Required Input -
- item_emdedding_distance_matrix = Pandas dataframe containing cosine distance matrix b/w items
- item_id = item ID for which we need to generate recommended items
- item_dict = Dictionary type input containing item_id as key and item_name as value
- n_items = Number of items needed as an output
Expected Output -
- recommended_items = List of recommended items
'''
recommended_items = list(pd.Series(item_emdedding_distance_matrix.loc[item_id, :]. \
sort_values(ascending=False).head(n_items + 1). \
index[1:n_items + 1]))
if show == True:
print("Item of interest :{0}".format(item_dict[item_id]))
print("Item similar to the above item:")
counter = 1
for i in recommended_items:
print(str(counter) + '- ' + item_dict[i])
counter += 1
return recommended_items
def create_item_emdedding_distance_matrix(model, interactions):
'''
Function to create item-item distance embedding matrix
Required Input -
- model = Trained matrix factorization model
- interactions = dataset used for training the model
Expected Output -
- item_emdedding_distance_matrix = Pandas dataframe containing cosine distance matrix b/w items
'''
df_item_norm_sparse = sparse.csr_matrix(model.item_embeddings)
similarities = cosine_similarity(df_item_norm_sparse)
item_emdedding_distance_matrix = | pd.DataFrame(similarities) | pandas.DataFrame |
from __future__ import unicode_literals
import csv
import pandas as pd
import numpy
import sys
import os
from gtfspy.gtfs import GTFS
from gtfspy.util import wgs84_distance
def get_spatial_bounds(gtfs, as_dict=False):
"""
Parameters
----------
gtfs
Returns
-------
min_lon: float
max_lon: float
min_lat: float
max_lat: float
"""
stats = get_stats(gtfs)
lon_min = stats['lon_min']
lon_max = stats['lon_max']
lat_min = stats['lat_min']
lat_max = stats['lat_max']
if as_dict:
return {'lon_min': lon_min, 'lon_max': lon_max, 'lat_min': lat_min, 'lat_max': lat_max}
else:
return lon_min, lon_max, lat_min, lat_max
def get_percentile_stop_bounds(gtfs, percentile):
stops = gtfs.get_table("stops")
percentile = min(percentile, 100 - percentile)
lat_min = numpy.percentile(stops['lat'].values, percentile)
lat_max = numpy.percentile(stops['lat'].values, 100 - percentile)
lon_min = numpy.percentile(stops['lon'].values, percentile)
lon_max = numpy.percentile(stops['lon'].values, 100 - percentile)
return lon_min, lon_max, lat_min, lat_max
def get_median_lat_lon_of_stops(gtfs):
"""
Get median latitude AND longitude of stops
Parameters
----------
gtfs: GTFS
Returns
-------
median_lat : float
median_lon : float
"""
stops = gtfs.get_table("stops")
median_lat = numpy.percentile(stops['lat'].values, 50)
median_lon = numpy.percentile(stops['lon'].values, 50)
return median_lat, median_lon
def get_centroid_of_stops(gtfs):
"""
Get mean latitude AND longitude of stops
Parameters
----------
gtfs: GTFS
Returns
-------
mean_lat : float
mean_lon : float
"""
stops = gtfs.get_table("stops")
mean_lat = numpy.mean(stops['lat'].values)
mean_lon = numpy.mean(stops['lon'].values)
return mean_lat, mean_lon
def write_stats_as_csv(gtfs, path_to_csv, re_write=False):
"""
Writes data from get_stats to csv file
Parameters
----------
gtfs: GTFS
path_to_csv: str
filepath to the csv file to be generated
re_write:
insted of appending, create a new one.
"""
stats_dict = get_stats(gtfs)
# check if file exist
if re_write:
os.remove(path_to_csv)
#if not os.path.isfile(path_to_csv):
# is_new = True
#else:
# is_new = False
is_new = True
mode = 'r' if os.path.exists(path_to_csv) else 'w+'
with open(path_to_csv, mode) as csvfile:
for line in csvfile:
if line:
is_new = False
else:
is_new = True
with open(path_to_csv, 'a') as csvfile:
if (sys.version_info > (3, 0)):
delimiter = u","
else:
delimiter = b","
statswriter = csv.writer(csvfile, delimiter=delimiter)
# write column names if
if is_new:
statswriter.writerow([key for key in sorted(stats_dict.keys())])
row_to_write = []
# write stats row sorted by column name
for key in sorted(stats_dict.keys()):
row_to_write.append(stats_dict[key])
statswriter.writerow(row_to_write)
def get_stats(gtfs):
"""
Get basic statistics of the GTFS data.
Parameters
----------
gtfs: GTFS
Returns
-------
stats: dict
A dictionary of various statistics.
Keys should be strings, values should be inputtable to a database (int, date, str, ...)
(but not a list)
"""
stats = {}
# Basic table counts
for table in ['agencies', 'routes', 'stops', 'stop_times', 'trips', 'calendar', 'shapes', 'calendar_dates',
'days', 'stop_distances', 'frequencies', 'feed_info', 'transfers']:
stats["n_" + table] = gtfs.get_row_count(table)
# Agency names
agencies = gtfs.get_table("agencies")
stats["agencies"] = "_".join(agencies['name'].values)
# Stop lat/lon range
stops = gtfs.get_table("stops")
lats = stops['lat'].values
lons = stops['lon'].values
percentiles = [0, 10, 50, 90, 100]
try:
lat_percentiles = numpy.percentile(lats, percentiles)
except IndexError:
lat_percentiles = [None] * 5
lat_min, lat_10, lat_median, lat_90, lat_max = lat_percentiles
stats["lat_min"] = lat_min
stats["lat_10"] = lat_10
stats["lat_median"] = lat_median
stats["lat_90"] = lat_90
stats["lat_max"] = lat_max
try:
lon_percentiles = numpy.percentile(lons, percentiles)
except IndexError:
lon_percentiles = [None] * 5
lon_min, lon_10, lon_median, lon_90, lon_max = lon_percentiles
stats["lon_min"] = lon_min
stats["lon_10"] = lon_10
stats["lon_median"] = lon_median
stats["lon_90"] = lon_90
stats["lon_max"] = lon_max
if len(lats) > 0:
stats["height_km"] = wgs84_distance(lat_min, lon_median, lat_max, lon_median) / 1000.
stats["width_km"] = wgs84_distance(lon_min, lat_median, lon_max, lat_median) / 1000.
else:
stats["height_km"] = None
stats["width_km"] = None
first_day_start_ut, last_day_start_ut = gtfs.get_day_start_ut_span()
stats["start_time_ut"] = first_day_start_ut
if last_day_start_ut is None:
stats["end_time_ut"] = None
else:
# 28 (instead of 24) comes from the GTFS stANDard
stats["end_time_ut"] = last_day_start_ut + 28 * 3600
stats["start_date"] = gtfs.get_min_date()
stats["end_date"] = gtfs.get_max_date()
# Maximum activity day
max_activity_date = gtfs.execute_custom_query(
'SELECT count(*), date '
'FROM days '
'GROUP BY date '
'ORDER BY count(*) DESC, date '
'LIMIT 1;').fetchone()
if max_activity_date:
stats["max_activity_date"] = max_activity_date[1]
max_activity_hour = gtfs.get_cursor().execute(
'SELECT count(*), arr_time_hour FROM day_stop_times '
'WHERE date=? GROUP BY arr_time_hour '
'ORDER BY count(*) DESC;', (stats["max_activity_date"],)).fetchone()
if max_activity_hour:
stats["max_activity_hour"] = max_activity_hour[1]
else:
stats["max_activity_hour"] = None
# Fleet size estimate: considering each line separately
if max_activity_date and max_activity_hour:
fleet_size_estimates = _fleet_size_estimate(gtfs, stats['max_activity_hour'], stats['max_activity_date'])
stats.update(fleet_size_estimates)
# Compute simple distributions of various columns that have a finite range of values.
# Commented lines refer to values that are not imported yet, ?
stats['routes__type__dist'] = _distribution(gtfs, 'routes', 'type')
# stats['stop_times__pickup_type__dist'] = _distribution(gtfs, 'stop_times', 'pickup_type')
# stats['stop_times__drop_off_type__dist'] = _distribution(gtfs, 'stop_times', 'drop_off_type')
# stats['stop_times__timepoint__dist'] = _distribution(gtfs, 'stop_times', 'timepoint')
stats['calendar_dates__exception_type__dist'] = _distribution(gtfs, 'calendar_dates', 'exception_type')
stats['frequencies__exact_times__dist'] = _distribution(gtfs, 'frequencies', 'exact_times')
stats['transfers__transfer_type__dist'] = _distribution(gtfs, 'transfers', 'transfer_type')
stats['agencies__lang__dist'] = _distribution(gtfs, 'agencies', 'lang')
stats['stops__location_type__dist'] = _distribution(gtfs, 'stops', 'location_type')
# stats['stops__wheelchair_boarding__dist'] = _distribution(gtfs, 'stops', 'wheelchair_boarding')
# stats['trips__wheelchair_accessible__dist'] = _distribution(gtfs, 'trips', 'wheelchair_accessible')
# stats['trips__bikes_allowed__dist'] = _distribution(gtfs, 'trips', 'bikes_allowed')
# stats[''] = _distribution(gtfs, '', '')
stats = _feed_calendar_span(gtfs, stats)
return stats
def _distribution(gtfs, table, column):
"""Count occurrences of values AND return it as a string.
Example return value: '1:5 2:15'"""
cur = gtfs.conn.cursor()
cur.execute('SELECT {column}, count(*) '
'FROM {table} GROUP BY {column} '
'ORDER BY {column}'.format(column=column, table=table))
return ' '.join('%s:%s' % (t, c) for t, c in cur)
def _fleet_size_estimate(gtfs, hour, date):
"""
Calculates fleet size estimates by two separate formula:
1. Considering all routes separately with no interlining and doing a deficit calculation at every terminal
2. By looking at the maximum number of vehicles in simultaneous movement
Parameters
----------
gtfs: GTFS
hour: int
date: ?
Returns
-------
results: dict
a dict with keys:
fleet_size_route_based
fleet_size_max_movement
"""
results = {}
fleet_size_list = []
cur = gtfs.conn.cursor()
rows = cur.execute(
'SELECT type, max(vehicles) '
'FROM ('
'SELECT type, direction_id, sum(vehicles) as vehicles '
'FROM '
'('
'SELECT trips.route_I, trips.direction_id, routes.route_id, name, type, count(*) as vehicles, cycle_time_min '
'FROM trips, routes, days, '
'('
'SELECT first_trip.route_I, first_trip.direction_id, first_trip_start_time, first_trip_end_time, '
'MIN(start_time_ds) as return_trip_start_time, end_time_ds as return_trip_end_time, '
'(end_time_ds - first_trip_start_time)/60 as cycle_time_min '
'FROM '
'trips, '
'(SELECT route_I, direction_id, MIN(start_time_ds) as first_trip_start_time, '
'end_time_ds as first_trip_end_time '
'FROM trips, days '
'WHERE trips.trip_I=days.trip_I AND start_time_ds >= ? * 3600 '
'AND start_time_ds <= (? + 1) * 3600 AND date = ? '
'GROUP BY route_I, direction_id) first_trip '
'WHERE first_trip.route_I = trips.route_I '
'AND first_trip.direction_id != trips.direction_id '
'AND start_time_ds >= first_trip_end_time '
'GROUP BY trips.route_I, trips.direction_id'
') return_trip '
'WHERE trips.trip_I=days.trip_I AND trips.route_I= routes.route_I '
'AND date = ? AND trips.route_I = return_trip.route_I '
'AND trips.direction_id = return_trip.direction_id '
'AND start_time_ds >= first_trip_start_time '
'AND start_time_ds < return_trip_end_time '
'GROUP BY trips.route_I, trips.direction_id '
'ORDER BY type, name, vehicles desc'
') cycle_times '
'GROUP BY direction_id, type'
') vehicles_type '
'GROUP BY type;', (hour, hour, date, date))
for row in rows:
fleet_size_list.append(str(row[0]) + ':' + str(row[1]))
results['fleet_size_route_based'] = " ".join(fleet_size_list)
# Fleet size estimate: maximum number of vehicles in movement
fleet_size_list = []
fleet_size_dict = {}
if hour:
for minute in range(hour * 3600, (hour + 1) * 3600, 60):
rows = gtfs.conn.cursor().execute(
'SELECT type, count(*) '
'FROM trips, routes, days '
'WHERE trips.route_I = routes.route_I '
'AND trips.trip_I=days.trip_I '
'AND start_time_ds <= ? '
'AND end_time_ds > ? + 60 '
'AND date = ? '
'GROUP BY type;',
(minute, minute, date))
for row in rows:
if fleet_size_dict.get(row[0], 0) < row[1]:
fleet_size_dict[row[0]] = row[1]
for key in fleet_size_dict.keys():
fleet_size_list.append(str(key) + ':' + str(fleet_size_dict[key]))
results["fleet_size_max_movement"] = ' '.join(fleet_size_list)
return results
def _n_gtfs_sources(gtfs):
n_gtfs_sources = gtfs.execute_custom_query(
"SELECT value FROM metadata WHERE key = 'n_gtfs_sources';").fetchone()
if not n_gtfs_sources:
n_gtfs_sources = [1]
return n_gtfs_sources
def _feed_calendar_span(gtfs, stats):
"""
Computes the temporal coverage of each source feed
Parameters
----------
gtfs: gtfspy.GTFS object
stats: dict
where to append the stats
Returns
-------
stats: dict
"""
n_feeds = _n_gtfs_sources(gtfs)[0]
max_start = None
min_end = None
if n_feeds > 1:
for i in range(n_feeds):
feed_key = "feed_" + str(i) + "_"
start_key = feed_key + "calendar_start"
end_key = feed_key + "calendar_end"
calendar_span = gtfs.conn.cursor().execute(
'SELECT min(date), max(date) FROM trips, days '
'WHERE trips.trip_I = days.trip_I AND trip_id LIKE ?;', (feed_key + '%',)).fetchone()
stats[start_key] = calendar_span[0]
stats[end_key] = calendar_span[1]
if calendar_span[0] is not None and calendar_span[1] is not None:
if not max_start and not min_end:
max_start = calendar_span[0]
min_end = calendar_span[1]
else:
if gtfs.get_day_start_ut(calendar_span[0]) > gtfs.get_day_start_ut(max_start):
max_start = calendar_span[0]
if gtfs.get_day_start_ut(calendar_span[1]) < gtfs.get_day_start_ut(min_end):
min_end = calendar_span[1]
stats["latest_feed_start_date"] = max_start
stats["earliest_feed_end_date"] = min_end
else:
stats["latest_feed_start_date"] = stats["start_date"]
stats["earliest_feed_end_date"] = stats["end_date"]
return stats
def update_stats(gtfs):
"""
Computes stats AND stores them into the underlying gtfs object (i.e. database).
Parameters
----------
gtfs: GTFS
"""
stats = get_stats(gtfs)
gtfs.update_stats(stats)
def trip_stats(gtfs, results_by_mode=False):
"""
Parameters
----------
gtfs: GTFS
results_by_mode: bool
Returns
-------
if results_by_mode is False:
q_result: pandas.DataFrame
if results_by_mode is True:
q_results: dict
a dict with the following keys:
[ADD HERE]
"""
conn = gtfs.conn
conn.create_function("find_distance", 4, wgs84_distance)
cur = conn.cursor()
# this query calculates the distance and travel time for each complete trip
# stop_data_df = pd.read_sql_query(query, self.conn, params=params)
query = 'SELECT ' \
'startstop.trip_I AS trip_I, ' \
'type, ' \
'sum(CAST(find_distance(startstop.lat, startstop.lon, endstop.lat, endstop.lon) AS INT)) as total_distance, ' \
'sum(endstop.arr_time_ds - startstop.arr_time_ds) as total_traveltime ' \
'FROM ' \
'(SELECT * FROM stop_times, stops WHERE stop_times.stop_I = stops.stop_I) startstop, ' \
'(SELECT * FROM stop_times, stops WHERE stop_times.stop_I = stops.stop_I) endstop, ' \
'trips, ' \
'routes ' \
'WHERE ' \
'startstop.trip_I = endstop.trip_I ' \
'AND startstop.seq + 1 = endstop.seq ' \
'AND startstop.trip_I = trips.trip_I ' \
'AND trips.route_I = routes.route_I ' \
'GROUP BY startstop.trip_I'
q_result = | pd.read_sql_query(query, conn) | pandas.read_sql_query |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'), u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip(u('x'))
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip(u('x'))
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_wrap(self):
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = Series([u('hello world'), u('hello world!'), u(
'hello world!!'), u('abcdefabcde'), u('abcdefabcdef'), u(
'abcdefabcdefa'), u('ab ab ab ab '), u('ab ab ab ab a'), u(
'\t')])
# expected values
xp = Series([u('hello world'), u('hello world!'), u('hello\nworld!!'),
u('abcdefabcde'), u('abcdefabcdef'), u('abcdefabcdef\na'),
u('ab ab ab ab'), u('ab ab ab ab\na'), u('')])
rs = values.str.wrap(12, break_long_words=True)
assert_series_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')
])
xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')])
rs = values.str.wrap(6)
assert_series_equal(rs, xp)
def test_get(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.get(1)
expected = Series(['b', 'd', np.nan, 'g'])
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(['a_b_c', NA, 'c_d_e', True, datetime.today(), None, 1,
2.])
rs = Series(mixed).str.split('_').str.get(1)
xp = Series(['b', NA, 'd', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.get(1)
expected = Series([u('b'), u('d'), np.nan, u('g')])
tm.assert_series_equal(result, expected)
# bounds testing
values = Series(['1_2_3_4_5', '6_7_8_9_10', '11_12'])
# positive index
result = values.str.split('_').str.get(2)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
# negative index
result = values.str.split('_').str.get(-3)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
def test_more_contains(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
'CABA', 'dog', 'cat'])
result = s.str.contains('a')
expected = Series([False, False, False, True, True, False, np.nan,
False, False, True])
assert_series_equal(result, expected)
result = s.str.contains('a', case=False)
expected = Series([True, False, False, True, True, False, np.nan, True,
False, True])
assert_series_equal(result, expected)
result = s.str.contains('Aa')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba', case=False)
expected = Series([False, False, False, True, True, False, np.nan,
True, False, False])
assert_series_equal(result, expected)
def test_contains_nan(self):
# PR #14171
s = Series([np.nan, np.nan, np.nan], dtype=np.object_)
result = s.str.contains('foo', na=False)
expected = Series([False, False, False], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na=True)
expected = Series([True, True, True], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na="foo")
expected = Series(["foo", "foo", "foo"], dtype=np.object_)
assert_series_equal(result, expected)
result = s.str.contains('foo')
expected = Series([np.nan, np.nan, np.nan], dtype=np.object_)
assert_series_equal(result, expected)
def test_more_replace(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA, 'CABA',
'dog', 'cat'])
result = s.str.replace('A', 'YYY')
expected = Series(['YYY', 'B', 'C', 'YYYaba', 'Baca', '', NA,
'CYYYBYYY', 'dog', 'cat'])
assert_series_equal(result, expected)
result = s.str.replace('A', 'YYY', case=False)
expected = Series(['YYY', 'B', 'C', 'YYYYYYbYYY', 'BYYYcYYY', '', NA,
'CYYYBYYY', 'dog', 'cYYYt'])
assert_series_equal(result, expected)
result = s.str.replace('^.a|dog', 'XX-XX ', case=False)
expected = Series(['A', 'B', 'C', 'XX-XX ba', 'XX-XX ca', '', NA,
'XX-XX BA', 'XX-XX ', 'XX-XX t'])
assert_series_equal(result, expected)
def test_string_slice_get_syntax(self):
s = Series(['YYY', 'B', 'C', 'YYYYYYbYYY', 'BYYYcYYY', NA, 'CYYYBYYY',
'dog', 'cYYYt'])
result = s.str[0]
expected = s.str.get(0)
assert_series_equal(result, expected)
result = s.str[:3]
expected = s.str.slice(stop=3)
assert_series_equal(result, expected)
result = s.str[2::-1]
expected = s.str.slice(start=2, step=-1)
assert_series_equal(result, expected)
def test_string_slice_out_of_bounds(self):
s = Series([(1, 2), (1, ), (3, 4, 5)])
result = s.str[1]
expected = Series([2, np.nan, 4])
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import os
import numpy as np
import pandas as pd
from datetime import datetime
from data_management import *
from et_models import *
from gs_models import *
lai_file = '/mnt/m/Original_data/FLUXNET/lai-combined-1/lai-combined-1-MCD15A3H-006-results.csv'
sites_params = pd.read_csv( '../../DATA/EEGS/sel2_sites_params.csv')
pft_params = pd.read_csv( '../../DATA/EEGS/selected_pft_params.csv')
data_directory = '/mnt/m/Original_data/FLUXNET/FLUXNET2015_2020dl/unzipped'
def insert_LAI(df, site_0, df_p):
df_lai = pd.read_csv(lai_file)
site = site_0
while site.endswith('R'):
site = site[:-1]
if site=='IT-Ro1':
df_lai = df_lai[(df_lai['siteID']=='IT-Ro2')
& (df_lai['MCD15A3H_006_FparLai_QC_MODLAND_Description']=='Good quality (main algorithm with or without saturation)')
& (df_lai['MCD15A3H_006_FparLai_QC_CloudState_Description']=='Significant clouds NOT present (clear)')
& (df_lai['MCD15A3H_006_FparExtra_QC_Aerosol_Description']=='No or low atmospheric aerosol levels detected')
& (df_lai['MCD15A3H_006_FparLai_QC_SCF_QC_Description']=='Main (RT) method used, best result possible (no saturation)')
]
else:
df_lai = df_lai[(df_lai['siteID'] == site)
& (df_lai['MCD15A3H_006_FparLai_QC_MODLAND_Description']=='Good quality (main algorithm with or without saturation)')
& (df_lai['MCD15A3H_006_FparLai_QC_CloudState_Description']=='Significant clouds NOT present (clear)')
& (df_lai['MCD15A3H_006_FparExtra_QC_Aerosol_Description']=='No or low atmospheric aerosol levels detected')
&(df_lai['MCD15A3H_006_FparLai_QC_SCF_QC_Description']=='Main (RT) method used, best result possible (no saturation)')
]
lai = df_lai['MCD15A3H_006_Lai_500m'].values
date = [datetime.strptime(dt, '%m/%d/%Y')
for dt in df_lai['Date'].values]
df_lai = | pd.DataFrame({'LAI': lai}, index=date) | pandas.DataFrame |
from sklearn.metrics import confusion_matrix, f1_score, roc_curve, auc
import numpy as np
import pandas as pd
class analysis:
"""
Functions:
_getComplexParams
_getSimpleParams
_getF1
_getROC
_loadLog
"""
def __init__(self):
pass
def __reset(self, reset):
if reset:
self._getComplexParams(abs=True)
self._getSimpleParams()
else:
try: self.dfComplex_
except: self._getComplexParams(abs=True)
try: self.dfSimple_
except: self._getSimpleParams()
def _applyCut(self, param, pMin=None, pMax=None, prob=0.5, reset=False):
"""
Function for cutting observations that don't meet the given constraints.
Note that pMin or pMax cannot be used at the same time.
To call:
_applyCut(param, pMin=None, pMax=None, prob=0.5, reset=False)
Parameters:
param (string) column name in data frame.
pMin minimum cutoff
pMax maximum cutoff
prob threshold probability for classifying complex
reset revert to original data frame before cutting
Postcondition:
Observations that don't satisfy the constrains are removed
from the complex data frame. If param == 'sig', then the
cuts are also applied to the data frame for simple sources.
"""
# ===================================================
# Reset check and verify data frames exist.
# ===================================================
self.__reset(reset)
# ===================================================
# Remove the complex sources that don't
# satisfy the condition
# ===================================================
loc1 = self.dfComplex_[param] < pMin if pMin else self.dfComplex_[param] > pMax
self.dfComplex_.drop(self.dfComplex_.index[loc1], inplace=True)
# ===================================================
# If noise, remove the simple sources
# ===================================================
if param == 'sig':
loc2 = self.dfSimple_['sig'] < pMin if pMin else self.dfSimple_['sig'] > pMax
self.dfSimple_.drop(self.dfSimple_.index[loc2], inplace=True)
# ===================================================
# Update the parameter dataframe
# ===================================================
self._getParams(prob=prob)
def _getParams(self, prob=0.5, reset=True):
"""
Function for getting the parameters associated with plotting.
To call:
_getParams(prob=0.5)
Parameters:
prob threshold probabality for complex
Postcondition:
The source label, as well as the model's predicted probability
that the source is complex and the predicted label using the
threshold probability are stored in the data frame
self.dfParams_
"""
# ===================================================
# Reset check and verify data frames exist.
# ===================================================
self.__reset(reset)
# ===================================================
# Get the prediction probabilities
# ===================================================
probComplex = self.dfComplex_['prob'].values
probSimplex = self.dfSimple_['prob'].values
# ===================================================
# Create a data frame for storing
# ===================================================
Sprob = pd.Series(np.concatenate((probComplex, probSimplex)), name='prob')
label = pd.Series(np.concatenate((len(probComplex)*[1], len(probSimplex)*[0])), name='label')
Spred = pd.Series(np.where(Sprob > prob, 1, 0), name='pred')
self.dfParams_ = pd.concat([Sprob, Spred, label], axis=1)
def _getComplexParams(self, abs=True):
"""
Function for extracting the data associated with
the second component of the complex source.
To call:
_getComplexParams(abs)
Parameters:
abs Take the absolute value of the difference
Postcondition:
The flux of the second component, the difference
in phases and depth between the two components,
and the noise value are stored in the data
frame "self.dfComplex_"
The model's predicted probability that
the source is complex is also stored.
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 1)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux of the second component
# ===================================================
flux = self.testFlux_[loc]
flux = np.asarray([f[1] for f in flux])
# ===================================================
# Compute the difference in phases
# ===================================================
chi = self.testChi_[loc]
chi = np.asarray([c[1] - c[0] for c in chi])
if abs: chi = np.abs(chi)
# ===================================================
# Compute the difference in Faraday depths
# ===================================================
depth = self.testDepth_[loc]
depth = np.asarray([d[1] - d[0] for d in depth])
if abs: depth = np.abs(depth)
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = pd.Series(depth, name='depth')
flux = pd.Series(flux, name='flux')
prob = pd.Series(prob, name='prob')
sig = pd.Series(sig, name="sig")
loc = pd.Series(loc, name='indx')
# ===================================================
# Store the results in a dataframe
# ===================================================
self.dfComplex_ = pd.concat([chi, depth, flux, prob, sig, loc], axis=1)
def _getSimpleParams(self):
"""
Function for extracting the data associated with
the simple sources.
To call:
_getSimpleParams()
Parameters:
None
Postcondition:
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 0)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux
# ===================================================
flux = self.testFlux_[loc]
# ===================================================
# Extract the phase
# ===================================================
chi = self.testChi_[loc]
# ===================================================
# Extract the Faraday depth
# ===================================================
depth = self.testDepth_[loc]
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = pd.Series(depth, name='depth')
flux = pd.Series(flux, name='flux')
prob = pd.Series(prob, name='prob')
sig = pd.Series(sig, name="sig")
loc = | pd.Series(loc, name='indx') | pandas.Series |
from pathlib import Path
import pandas as pd
from graphviz import Digraph
def plot_open_and_closed_tickets(times: pd.DataFrame) -> None:
"""
Plots the open and closed tickets per day
:param times: Dataframe with the durations for each work item
:return:
"""
resample_period = 'D' # We’re going to resample the dataframe per day
open_per_day = times.resample(resample_period, on='start').work_item.count().rename('open_tickets_per_day')
is_closed = times.end.notnull()
closed_per_day = times.loc[is_closed] \
.resample(resample_period, on='end') \
.work_item.count() \
.rename('closed_tickets_per_day')
tickets_df = ( | pd.concat([open_per_day, closed_per_day], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from powersimdata.design.mimic_grid import mimic_generation_capacity
from powersimdata.input.grid import Grid
from powersimdata.network.model import area_to_loadzone
from powersimdata.scenario.scenario import Scenario
def _check_solar_fraction(solar_fraction):
"""Checks that the solar_fraction is between 0 and 1, or is None.
:param float scale_fraction: desired solar fraction for new capacity.
:raises TypeError: if type is not int, float, or None.
:raises ValueError: if value is not between 0 and 1.
"""
if solar_fraction is None:
pass
elif isinstance(solar_fraction, (int, float)):
if not (0 <= solar_fraction <= 1):
raise ValueError("solar_fraction must be between 0 and 1")
else:
raise TypeError("solar_fraction must be int/float or None")
def _apply_zone_scale_factor_to_ct(ct, fuel, zone_id, scale_factor):
"""Applies a zone scaling factor to a change table, creating internal
change table structure as necessary. New keys are added, existing keys are
multiplied.
:param dict ct: a dictionary of scale factors, with structure matching
ct from powersimdata.input.change_table.ChangeTable.
:param str fuel: the fuel to be scaled.
:param int zone_id: the zone_id to be scaled.
:param int/float scale_factor: how much the zone should be scaled up by.
"""
if fuel not in ct:
ct[fuel] = {}
if "zone_id" not in ct[fuel]:
ct[fuel]["zone_id"] = {}
if zone_id not in ct[fuel]["zone_id"]:
ct[fuel]["zone_id"][zone_id] = scale_factor
else:
ct[fuel]["zone_id"][zone_id] *= scale_factor
def load_targets_from_csv(filename, drop_ignored=True):
"""Interprets a CSV file as a set of targets, ensuring that required columns are present,
and filling in default values for optional columns.
:param str filename: filepath to targets csv.
:param bool drop_ignored: if True, drop all ignored columns from output.
:return: (*pandas.DataFrame*) -- DataFrame of targets from csv file
:raises TypeError: if filename is not a string
:raises ValueError: if one or more required columns is missing.
"""
# Constants
mandatory_columns = {
"region_name",
"ce_target_fraction",
}
optional_column_defaults = {
"allowed_resources": "solar, wind",
"external_ce_addl_historical_amount": 0,
"solar_percentage": np.nan,
"area_type": np.nan,
}
# Validate input
if not isinstance(filename, str):
raise TypeError("filename must be a str")
# Interpret as object so that we can fillna() with a mixed-type dict
raw_targets = pd.read_csv(filename).astype(object)
raw_columns = set(raw_targets.columns)
if not mandatory_columns <= raw_columns:
missing_columns = mandatory_columns - raw_columns
raise ValueError(f'Missing columns: {", ".join(missing_columns)}')
raw_targets.set_index("region_name", inplace=True)
# Report which columns are used vs. unused
ignored_columns = raw_columns - mandatory_columns - optional_column_defaults.keys()
print(f"ignoring: {ignored_columns}")
if drop_ignored:
raw_targets.drop(ignored_columns, axis=1, inplace=True)
for column in optional_column_defaults.keys():
# Fill optional columns that are missing entirely
if column not in raw_columns:
raw_targets[column] = np.nan
# Fill any empty cells within optional columns
raw_targets.fillna(value=optional_column_defaults, inplace=True)
return raw_targets
def _make_zonename2target(grid, targets):
"""Creates a dictionary of {zone_name: target_name} pairs.
:param powersimdata.input.grid.Grid grid: Grid instance defining the set of zones.
:param pandas.DataFrame targets: a dataframe used to look up constituent zones.
:return: (*dict*) -- a dictionary of {zone_name: target_name} pairs.
:raises ValueError: if a zone is not present in any target areas, or
if a zone is present in more than one target area.
"""
grid_model = grid.grid_model
target_zones = {
target_name: area_to_loadzone(grid_model, target_name)
if pd.isnull(targets.loc[target_name, "area_type"])
else area_to_loadzone(
grid_model, target_name, targets.loc[target_name, "area_type"]
)
for target_name in targets.index.tolist()
}
# Check for any collisions
zone_sets = target_zones.values()
if len(set.union(*zone_sets)) != sum([len(t) for t in zone_sets]):
zone_sets_list = [zone for _set in zone_sets for zone in _set]
duplicates = {zone for zone in zone_sets_list if zone_sets_list.count(zone) > 1}
error_areas = {
zone: {area for area, zone_set in target_zones.items() if zone in zone_set}
for zone in duplicates
}
error_msgs = [f"{k} within: {', '.join(v)}" for k, v in error_areas.items()]
raise ValueError(f"Zone(s) within multiple area! {'; '.join(error_msgs)}")
zonename2target = {}
for target_name, zone_set in target_zones.items():
# Filter out parts of states not in the interconnect(s) in this Grid
filtered_zone_set = zone_set & set(grid.zone2id.keys())
zonename2target.update({zone: target_name for zone in filtered_zone_set})
untargetted_zones = set(grid.zone2id.keys()) - set(zonename2target.keys())
if len(untargetted_zones) > 0:
err_msg = f"Targets do not cover all load zones. Missing: {untargetted_zones}"
raise ValueError(err_msg)
return zonename2target
def _get_scenario_length(scenario):
"""Get the number of hours in a scenario.
:param powersimdata.scenario.scenario.Scenario scenario: A Scenario instance.
:return: (*int*) -- the number of hours in the scenario.
"""
if not isinstance(scenario, Scenario):
raise TypeError("next_scenario must be a Scenario object")
if scenario.state.name == "create":
start_ts = pd.Timestamp(scenario.state.builder.start_date)
end_ts = pd.Timestamp(scenario.state.builder.end_date)
else:
start_ts = pd.Timestamp(scenario.info["start_date"])
end_ts = pd.Timestamp(scenario.info["end_date"])
num_hours = (end_ts - start_ts) / pd.Timedelta(hours=1) + 1
return num_hours
def add_resource_data_to_targets(input_targets, scenario, calculate_curtailment=False):
"""Add resource data to targets. This data includes: previous capacity,
previous generation, previous capacity factor (with and without curtailment),
and previous curtailment.
:param pandas.DataFrame input_targets: table includeing target names, used to
summarize resource data.
:param powersimdata.scenario.scenario.Scenario scenario: A Scenario instance.
:return: (*pandas.DataFrame*) -- DataFrame of targets including resource data.
"""
targets = input_targets.copy()
grid = scenario.state.get_grid()
plant = grid.plant
curtailment_types = ["hydro", "solar", "wind"]
scenario_length = _get_scenario_length(scenario)
# Map each zone in the grid to a target
zonename2target = _make_zonename2target(grid, targets)
plant["target_area"] = [zonename2target[z] for z in plant["zone_name"]]
# Summarize important values by target area & type
groupby_cols = [plant.target_area, plant.type]
# Capacity
capacity_groupby = plant.Pmax.groupby(groupby_cols)
capacity_by_target_type = capacity_groupby.sum().unstack(fill_value=0)
# Generated energy
pg_groupby = scenario.state.get_pg().sum().groupby(groupby_cols)
summed_generation = pg_groupby.sum().unstack(fill_value=0)
# Calculate capacity factors
possible_energy = scenario_length * capacity_by_target_type[curtailment_types]
capacity_factor = summed_generation[curtailment_types] / possible_energy
if calculate_curtailment:
# Calculate: curtailment, no_curtailment_cap_factor
# Hydro and solar are straightforward
hydro_plant_sum = scenario.state.get_hydro().sum()
hydro_plant_targets = plant[plant.type == "hydro"].target_area
hydro_potential_by_target = hydro_plant_sum.groupby(hydro_plant_targets).sum()
solar_plant_sum = scenario.state.get_solar().sum()
solar_plant_targets = plant[plant.type == "solar"].target_area
solar_potential_by_target = solar_plant_sum.groupby(solar_plant_targets).sum()
# Wind is a little tricker because get_wind() returns 'wind' and 'wind_offshore'
onshore_wind_plants = plant[plant.type == "wind"].index
onshore_wind_plant_sum = scenario.state.get_wind().sum()[onshore_wind_plants]
wind_plant_targets = plant[plant.type == "wind"].target_area
wind_potential_by_target = onshore_wind_plant_sum.groupby(
wind_plant_targets
).sum()
potentials_series = [
hydro_potential_by_target,
solar_potential_by_target,
wind_potential_by_target,
]
potential = pd.concat(potentials_series, axis=1)
curtailment = (
potential - summed_generation[curtailment_types]
) / possible_energy
no_curtailment_cap_factor = potential / possible_energy
# Now add these calculations to the DataFrame
total_capacity = capacity_by_target_type.sum()
nonzero_capacity_resources = total_capacity[total_capacity > 0].index.tolist()
for r in nonzero_capacity_resources:
targets[f"{r}.prev_capacity"] = capacity_by_target_type[r]
targets[f"{r}.prev_generation"] = summed_generation[r]
if r in curtailment_types:
targets[f"{r}.prev_cap_factor"] = capacity_factor[r]
targets[f"{r}.addl_curtailment"] = 0
if calculate_curtailment:
targets[f"{r}.no_curtailment_cap_factor"] = no_curtailment_cap_factor[r]
targets[f"{r}.curtailment"] = curtailment[r]
return targets
def add_demand_to_targets(input_targets, scenario):
"""Add demand data to targets.
:param pandas.DataFrame input_targets: table including target names, used to
summarize demand.
:param powersimdata.scenario.scenario.Scenario scenario: A Scenario instance.
:return: (*pandas.DataFrame*) -- DataFrame of targets including demand data.
"""
grid = scenario.state.get_grid()
targets = input_targets.copy()
zonename2target = _make_zonename2target(grid, targets)
zoneid2target = {grid.zone2id[z]: target for z, target in zonename2target.items()}
summed_demand = scenario.state.get_demand().sum().to_frame()
summed_demand["target"] = [zoneid2target[id] for id in summed_demand.index]
targets["demand"] = summed_demand.groupby("target").sum()
return targets
def add_shortfall_to_targets(input_targets):
"""Add shortfall data to targets.
:param pandas.DataFrame input_targets: table with demand, prev_generation,
and ce_target_fraction.
:return: (*pandas.DataFrame*) -- DataFrame of targets including shortfall data.
"""
targets = input_targets.copy()
allowed_resources_dict = targets.allowed_resources.to_dict()
allowed_sets = {
target: {resource.strip() for resource in allowed.split(",")}
for target, allowed in allowed_resources_dict.items()
}
# Detect if there are allowed resources that aren't in the grid, and add them
all_allowed = set().union(*allowed_sets.values())
for resource in all_allowed:
if f"{resource}.prev_generation" not in targets.columns:
targets[f"{resource}.prev_generation"] = 0
targets["prev_ce_generation"] = targets.apply(
lambda x: sum([x[f"{r}.prev_generation"] for r in allowed_sets[x.name]]), axis=1
)
targets["ce_target"] = targets.demand * targets.ce_target_fraction
total_ce_generation = (
targets.prev_ce_generation + targets.external_ce_addl_historical_amount
)
raw_shortfall = targets.ce_target - total_ce_generation
targets["ce_shortfall"] = raw_shortfall.clip(lower=0)
targets["ce_overgeneration"] = (-1 * raw_shortfall).clip(lower=0)
return targets
def calculate_overall_shortfall(targets, method, normalized=False):
"""Calculates overall shortfall.
:param pandas.DataFrame targets: table of targets.
:param str method: shortfall calculation method ("independent" or "collaborative").
:param bool normalized: whether to normalize by total demand.
:return: (*float*) -- overall shortfall, either in MWh or normalized by
total demand.
"""
if not isinstance(targets, pd.DataFrame):
raise TypeError("targets must be a pandas DataFrame")
if "ce_shortfall" not in targets.columns:
raise ValueError("targets missing shortfall, see add_shortfall_to_targets()")
if not isinstance(normalized, bool):
raise TypeError("normalized must be bool")
allowed_methods = {"independent", "collaborative"}
if method == "collaborative":
participating_targets = targets[targets.ce_target > 0]
summed_shortfall = participating_targets.ce_shortfall.sum()
summed_overgeneration = participating_targets.ce_overgeneration.sum()
overall_shortfall = summed_shortfall - summed_overgeneration
elif method == "independent":
overall_shortfall = targets.ce_shortfall.sum()
else:
raise ValueError(f"method must be one of: {allowed_methods}")
if normalized:
return overall_shortfall / targets.demand.sum()
else:
return overall_shortfall
def add_new_capacities_independent(
input_targets, scenario_length, addl_curtailment=None
):
"""Calculates new capacities based on an Independent strategy.
:param pandas.DataFrame input_targets: table of targets.
:param int scenario_length: number of hours in new scenario.
:param pandas.DataFrame/None addl_curtailment: additional expected curtailment
by target/resource. If None, assumed zero for all targets/resources.
:return: (*pandas.DataFrame*) -- targets dataframe with next capacities added.
"""
def calculate_added_capacity(target):
if | pd.isnull(target["solar_percentage"]) | pandas.isnull |
from datetime import datetime, timedelta
from math import isnan
import logging
import os
import pandas as pd
data = [
{'Stock Symbol': 'TEA', 'Type': 'Common', 'Last Dividend': 0, 'Fixed Dividend': '', 'Par Value': 100},
{'Stock Symbol': 'POP', 'Type': 'Common', 'Last Dividend': 8, 'Fixed Dividend': '', 'Par Value': 100},
{'Stock Symbol': 'ALE', 'Type': 'Common', 'Last Dividend': 23, 'Fixed Dividend': '', 'Par Value': 60},
{'Stock Symbol': 'GIN', 'Type': 'Preferred', 'Last Dividend': 8, 'Fixed Dividend': 0.02, 'Par Value': 100},
{'Stock Symbol': 'JOE', 'Type': 'Common', 'Last Dividend': 13, 'Fixed Dividend': '', 'Par Value': 250}]
trade_book = []
logging.basicConfig(filename=os.path.join(os.path.split(os.getcwd())[0], "stock.log"), format='%(asctime)s | %(message)s', filemode='w')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class Stock_Exchange(object):
"""
Sample stock exchange class
"""
def __init__(self):
self.df = pd.DataFrame(data)
def calculate_dividend(self, price, stock_symbol):
"""
Calculates Dividend Yield for stock symbol using its given price.
:param price: Price of the stock
:type price: Integer
:param stock_symbol: Stock Symbol
:type stock_symbol: String
:returns: Dividend yield
"""
logger.debug(' ************** Calculate Dividend ************** ')
div_yield = 0.0
calc_type = self.df[(self.df['Stock Symbol'] == stock_symbol)]['Type']
last_dividend = self.df[(self.df['Stock Symbol'] == stock_symbol)]['Last Dividend']
fixed_dividend = self.df[(self.df['Stock Symbol'] == stock_symbol)]['Fixed Dividend']
par_val = self.df[(self.df['Stock Symbol'] == stock_symbol)]['Par Value']
if price > 0:
if str(calc_type.iloc[0]) == 'Preferred':
if not isnan(fixed_dividend):
div_yield = (float(float(fixed_dividend)) * float(par_val))/price
else:
div_yield = float(last_dividend)/price
logger.info("For price %s and stock symbol %s dividend yield is %s" %(price, stock_symbol, div_yield))
return div_yield
def calculate_pe(self, price, stock_symbol):
"""
Calculates P/E ratio
:param price: Price of the stock
:type price: Integer
:param stock_symbol: Stock Symbol
:type stock_symbol: String
:returns: P/E Ratio
"""
logger.debug(' ************** Calculate P/E Ratio ************** ')
pe_ratio = 0
last = self.df[(self.df['Stock Symbol'] == stock_symbol)]['Last Dividend']
if float(last) > 0:
pe_ratio = price/float(last)
logger.info("For price %s and stock symbol %s P/E Ratio is %s" %(price, stock_symbol, pe_ratio))
return pe_ratio
def register_trade(self, price, stock_symbol, quantity, trade_type):
"""
Registers new trade information
:param price: Price of the stock
:type price: Integer
:param stock_symbol: Stock Symbol
:type stock_symbol: String
:param quantity: Quantity
:type quantity: Integer
:param trade_type: Trade type-Buy or Sell
:type trade_type: String
"""
logger.debug(' ************** Register Trade ************** ')
trade_book.append([stock_symbol, price, quantity, trade_type, datetime.now()])
logger.info("New trade registered for stock symbol %s with trade type %s is %s" %(stock_symbol, trade_type, trade_book))
def volume_weight_stock_price(self, stock_symbol):
"""
Calculates volume weighted stock price
:param stock_symbol: Stock Symbol
:type stock_symbol: String
:returns: Volume Weighted Stock Price
"""
logger.debug(' ************** Calculate Volume weight stock price ************** ')
vol_weighted_stock_price = 0
self.df = | pd.DataFrame(trade_book) | pandas.DataFrame |
import argparse
import os
from collections import defaultdict
import re
import itertools
import pandas
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
def results_to_csv(results_dir, out):
results = defaultdict(lambda: defaultdict(lambda: 0))
for filename in os.listdir(results_dir):
if filename.endswith(".asan.log"):
key = "ASan"
elif filename.endswith(".binary-asan.log"):
key = "BASan"
else:
key = "Valgrind Memcheck"
fullpath = os.path.join(results_dir, filename)
with open(fullpath, encoding="ISO-8859-1") as fd:
data = fd.read().split("\n")[-9:]
keys2 = ["Total", "True Positive", "False Negative",
"Total", "True Negative", "False Positive",
"Timeout Vuln", "Timeout Safe"]
for idx, line in enumerate(data):
if not line:
continue
value = int(line.split(": ")[1].strip())
results[key][keys2[idx]] += value
df = pandas.DataFrame.from_dict(results)
df = df.reset_index()
df['index'] = pandas.Categorical(
df['index'], ["Total", "True Positive", "True Negative",
"False Positive", "False Negative",
"Timeout Vuln", "Timeout Safe"])
df = df.sort_values('index').set_index('index').rename_axis(None)
csvf = out + ".csv"
with open(csvf, 'w') as fd:
fd.write(df.to_csv())
def deep_analyze(results_dir, out):
#plt.rcParams['font.size'] = 1
results = defaultdict(set)
counts = defaultdict(lambda: defaultdict(lambda: 0))
all_cwes = set()
for filename in os.listdir(results_dir):
if filename.endswith(".asan.log"):
key = "ASan"
elif filename.endswith(".binary-asan.log"):
key = "BASan"
else:
key = "Valgrind Memcheck"
fullpath = os.path.join(results_dir, filename)
with open(fullpath, encoding="ISO-8859-1") as fd:
data = fd.read().split("\n")
data = set(data)
for line in data:
if not line.startswith("CWE"):
continue
if "failed" in line and "bad" in line:
cwes = tuple(re.findall(r"(CWE[0-9]+)", line))
results[key].add(cwes)
counts[key][cwes] += 1
all_cwes.update(cwes)
print(all_cwes)
xlabels = ["CWE121", "CWE122", "CWE124", "CWE126", "CWE127"]
ylabels = list(sorted(all_cwes.difference(xlabels)))
all_cwes = list(sorted(all_cwes))
points = defaultdict(lambda: [[], []])
annotations = defaultdict(list)
keyys = {'ASan': +0.1, 'BASan': -0.1, 'Valgrind Memcheck': 0}
keyxs = {'ASan': -0.1, 'BASan': -0.1, 'Valgrind Memcheck': 0.1}
for key, failed in results.items():
for tags in failed:
if not tags:
continue
tag0 = xlabels.index(tags[0])
if len(tags) > 1:
tag1 = ylabels.index(tags[1]) + 1
else:
tag1 = 0
x = tag0 + keyxs[key]
y = tag1 + keyys[key]
#x = 0.20 * np.random.random() + (tag0)
#y = 0.20 * np.random.random() + (tag1)
points[key][0].append(x)
points[key][1].append(y)
annotations[key].append(counts[key][tags])
colors = {
'ASan': '#1b9e77',
'BASan': '#d95f02',
'Valgrind Memcheck': '#7570b3'}
fig = plt.figure()
ax = fig.add_subplot(111)
print(annotations)
plt.scatter(
points["ASan"][1],
points["ASan"][0],
c=colors["ASan"],
alpha=1.0,
marker="+")
plt.scatter(
points["BASan"][1],
points["BASan"][0],
c=colors["BASan"],
alpha=1.0,
marker="x")
plt.scatter(
points["Valgrind Memcheck"][1],
points["Valgrind Memcheck"][0],
c=colors["Valgrind Memcheck"],
alpha=1.0,
marker="^")
plt.plot([], c=colors["ASan"], marker="+", label="ASan")
plt.plot([], c=colors["BASan"], marker="x", label="BASan")
plt.plot([], c=colors["Valgrind Memcheck"], marker="^", label="Valgrind Memcheck")
plt.legend()
for key, values in points.items():
for idx in range(len(values[0])):
tx = values[1][idx] + keyys[key]
ty = values[0][idx] + keyxs[key]
if keyys[key] < 0:
tx -= 0.1
else:
tx -= 0.04
if keyxs[key] < 0:
ty -= 0.05
plt.annotate(
annotations[key][idx], (values[1][idx], values[0][idx]),
xytext=(tx, ty),
fontsize=6,
color=colors[key])
xlabels = [x[3:] for x in xlabels]
ylabels = [y[3:] for y in ylabels]
plt.yticks(range(0, len(xlabels)), xlabels, rotation=0)
plt.xticks(range(0, len(ylabels) + 1), ['N/A'] + ylabels)
plt.ylim(-0.5, len(xlabels))
plt.xlim(-1, len(ylabels) + 1.5)
ax.set_ylabel("Primary CWE-ID (What/Where)")
ax.set_xlabel("Secondary CWE-ID (How)")
plt.tight_layout()
plt.savefig(out + "-scatter.pdf")
#for k1, k2 in itertools.combinations(results.keys(), 2):
#print(
#"{} & {}".format(k1, k2),
#results[k1].intersection(results[k2])
#)
def results_to_latex(out):
csvf = out + ".csv"
df = | pandas.read_csv(csvf) | pandas.read_csv |
import time
import numpy as np
import pandas as pd
from collections import defaultdict
from joblib import effective_n_jobs
from typing import List, Dict
from anndata import AnnData
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
# from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from pegasus.io import read_input
import logging
from .. import decorators as pg_deco
logger = logging.getLogger("pegasus")
@pg_deco.TimeLogger()
def find_markers(
data: AnnData,
label_attr: str,
de_key: str = "de_res",
n_jobs: int = -1,
min_gain: float = 1.0,
random_state: int = 0,
remove_ribo: bool = False,
) -> Dict[str, Dict[str, List[str]]]:
"""Find markers using gradient boosting method.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
label_attr: ``str``
Cluster labels used for finding markers. Must exist in ``data.obs``.
de_key: ``str``, optional, default: ``"de_res"``
Keyword of DE analysis result stored in ``data.varm``.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to used. If ``-1``, use all available threads.
min_gain: ``float``, optional, default: ``1.0``
Only report genes with a feature importance score (in gain) of at least ``min_gain``.
random_state: ``int``, optional, default: ``0``
Random seed set for reproducing results.
remove_ribo: ``bool``, optional, default: ``False``
If ``True``, remove ribosomal genes with either RPL or RPS as prefixes.
Returns
-------
markers: ``Dict[str, Dict[str, List[str]]]``
A Python dictionary containing marker information in structure ``dict[cluster_id]['up' or 'down'][dataframe]``.
Examples
--------
>>> marker_dict = pg.find_markers(adata, label_attr = 'leiden_labels')
"""
n_jobs = effective_n_jobs(n_jobs)
if remove_ribo:
data = data[
:,
np.vectorize(lambda x: not x.startswith("RPL") and not x.startswith("RPS"))(
data.var_names
),
]
X_train, X_test, y_train, y_test = train_test_split(
data.X,
data.obs[label_attr],
test_size=0.1,
random_state=random_state,
stratify=data.obs[label_attr],
)
# start = time.time()
# xgb = XGBClassifier(n_jobs = n_jobs, n_gpus = 0)
# xgb.fit(X_train, y_train, eval_set = [(X_train, y_train), (X_test, y_test)], eval_metric = 'merror')
# # print(xgb.evals_result())
# end = time.time()
# print("XGBoost used {:.2f}s to train.".format(end - start))
start_lgb = time.time()
lgb = LGBMClassifier(n_jobs=n_jobs, metric="multi_error", importance_type="gain")
lgb.fit(
X_train,
y_train,
eval_set=[(X_train, y_train), (X_test, y_test)],
early_stopping_rounds=1,
)
end_lgb = time.time()
logger.info("LightGBM used {:.2f}s to train.".format(end_lgb - start_lgb))
ntot = (lgb.feature_importances_ >= min_gain).sum()
ords = np.argsort(lgb.feature_importances_)[::-1][:ntot]
log_exprs = [
x for x in data.varm[de_key].dtype.names if x.startswith("mean_logExpr:")
]
labels = [x.rpartition(":")[2] for x in log_exprs]
titles = [("down", "down_gain"), ("weak", "weak_gain"), ("strong", "strong_gain")]
markers = defaultdict(lambda: defaultdict(list))
kmeans = KMeans(n_clusters=3, random_state=random_state)
for gene_id in ords:
gene_symbol = data.var_names[gene_id]
mydat = [[x] for x in data.varm[de_key][log_exprs][gene_id]]
kmeans.fit(mydat)
kmeans_label_mode = pd.Series(kmeans.labels_).mode()[0]
for i, kmeans_label in enumerate(np.argsort(kmeans.cluster_centers_[:, 0])):
if kmeans_label != kmeans_label_mode:
for pos in (kmeans.labels_ == kmeans_label).nonzero()[0]:
clust_label = labels[pos]
markers[clust_label][titles[i][0]].append(gene_symbol)
markers[clust_label][titles[i][1]].append(
"{:.2f}".format(lgb.feature_importances_[gene_id])
)
return markers
def run_find_markers(
input_h5ad_file: str,
output_file: str,
label_attr: str,
de_key: str = "de_res",
n_jobs: int = -1,
min_gain: float = 1.0,
random_state: int = 0,
remove_ribo: bool = False,
) -> None:
"""
For command line use.
"""
import xlsxwriter
from natsort import natsorted
data = read_input(input_h5ad_file)
markers = find_markers(
data,
label_attr,
de_key=de_key,
n_jobs=n_jobs,
min_gain=min_gain,
random_state=random_state,
remove_ribo=remove_ribo,
)
keywords = [("strong", "strong_gain"), ("weak", "weak_gain"), ("down", "down_gain")]
writer = | pd.ExcelWriter(output_file, engine="xlsxwriter") | pandas.ExcelWriter |
# -*- coding: utf-8 -*-
import pandas as pd
import re
import unicodedata
import urllib
import os.path
import glob
import yaml
"""
This script imports the updated 2020 titles for goals, targets, and indicators.
In keeping with past imports, the id numbers (eg, 1.1.1, 1.1, etc) are stripped
from the beginnings of the titles.
Titles: Currently the titles are translated (at the UN level) into Arabic,
Chinese, Spanish, French, English, and Russian; however the Arabic
translations are only available as a PDF, which is more difficult to parse with
a script (and so are being skipped).
"""
def sdg_number_from_text(text):
"""
This parses a string of text and pulls out the SDG number. Possible formats of
return value are: '1', '1.1', '1.1.1'
"""
if pd.isnull(text):
return None
matches = re.findall(r'(\d+)(\.\w+)?(\.\w+)?', text)
if len(matches) > 0:
match = ''.join(matches[0])
# Sanity checks.
match_parts = match.split('.')
# In these cases, a missing space causes the first word
# of the indicator title to appear as an extension of the
# third id part.
if len(match_parts) == 3 and len(match_parts[2]) > 2:
match_2_replacement = ''
for character in match_parts[2]:
if character.isnumeric() or character.islower():
match_2_replacement += character
else:
break
if match_2_replacement != '' and match_2_replacement != match_parts[2]:
match = match_parts[0] + '.' + match_parts[1] + '.' + match_2_replacement
return match
else:
return None
def sdg_goal_is_valid(text):
if text is None:
return False
parts = text.split('.')
if len(parts) > 1:
return False
if not text.isnumeric():
return False
if int(text) > 17:
return False
return True
def sdg_indicator_is_valid(text):
if text is None:
return False
parts = text.split('.')
if len(parts) != 3:
return False
return True
def sdg_target_is_valid(text):
if text is None:
return False
parts = text.split('.')
if len(parts) != 2:
return False
return True
def sdg_text_without_number(text, number):
"""
This simply removes a number from some text.
"""
normalized = unicodedata.normalize("NFKD", str(text))
# Remove the number and everything before it.
parts = normalized.split(number)
if len(parts) == 2:
return parts[1].lstrip('.').strip()
else:
return normalized
def clean_indicator_title(title):
last = title[-1]
if last == 'i':
return title[:-1]
if last.isnumeric():
last_word = title.split(' ')[-1]
last_word = last_word.split('-')[-1]
last_word = last_word.split('–')[-1]
last_word = last_word.split('‐')[-1]
last_word = last_word.split('+B')[-1]
if not last_word.isnumeric():
print('Found a footnote: ' + title)
return title[:-1]
return title
def clean_target_title(title):
last = title[-1]
if last.isnumeric() and last != '0':
return title[:-1]
return title
def clean_goal_title(title):
last = title[-1]
if last.isnumeric():
return title[:-1]
return title
def main():
global_goals = {}
global_targets = {}
global_indicators = {}
# First, the titles.
title_spreadsheets = {
'en': 'https://unstats.un.org/sdgs/indicators/Global%20Indicator%20Framework%20after%202020%20review_English.xlsx',
'zh-Hans': 'https://unstats.un.org/sdgs/indicators/Global%20Indicator%20Framework%20after%202020%20review_Chinese.xlsx',
'es': 'https://unstats.un.org/sdgs/indicators/Global%20Indicator%20Framework%20after%202020%20review_Spanish.xlsx',
'fr': 'https://unstats.un.org/sdgs/indicators/Global%20Indicator%20Framework%20after%202020%20review_French.xlsx',
'ru': 'https://unstats.un.org/sdgs/indicators/Global%20Indicator%20Framework%20after%202020%20review_Russian.xlsx'
}
for language in title_spreadsheets:
global_goals[language] = {}
global_targets[language] = {}
global_indicators[language] = {}
spreadsheet_url = title_spreadsheets[language]
import_options = {
'header': None,
'names': ['target', 'indicator'],
'usecols': [1, 2],
'skiprows': [0, 1, 2],
'skipfooter': 6,
#'encoding': 'utf-8',
}
df = | pd.read_excel(spreadsheet_url, **import_options) | pandas.read_excel |
from itertools import chain
import operator
import numpy as np
import pytest
from pandas.core.dtypes.common import is_number
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
from pandas.core.groupby.base import maybe_normalize_deprecated_kernels
from pandas.tests.apply.common import (
frame_transform_kernels,
series_transform_kernels,
)
@pytest.mark.parametrize("func", ["sum", "mean", "min", "max", "std"])
@pytest.mark.parametrize(
"args,kwds",
[
pytest.param([], {}, id="no_args_or_kwds"),
pytest.param([1], {}, id="axis_from_args"),
pytest.param([], {"axis": 1}, id="axis_from_kwds"),
pytest.param([], {"numeric_only": True}, id="optional_kwds"),
pytest.param([1, True], {"numeric_only": True}, id="args_and_kwds"),
],
)
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_apply_with_string_funcs(request, float_frame, func, args, kwds, how):
if len(args) > 1 and how == "agg":
request.node.add_marker(
pytest.mark.xfail(
raises=TypeError,
reason="agg/apply signature mismatch - agg passes 2nd "
"argument to func",
)
)
result = getattr(float_frame, how)(func, *args, **kwds)
expected = getattr(float_frame, func)(*args, **kwds)
tm.assert_series_equal(result, expected)
def test_with_string_args(datetime_series):
for arg in ["sum", "mean", "min", "max", "std"]:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
@pytest.mark.parametrize("op", ["mean", "median", "std", "var"])
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_apply_np_reducer(float_frame, op, how):
# GH 39116
float_frame = DataFrame({"a": [1, 2], "b": [3, 4]})
result = getattr(float_frame, how)(op)
# pandas ddof defaults to 1, numpy to 0
kwargs = {"ddof": 1} if op in ("std", "var") else {}
expected = Series(
getattr(np, op)(float_frame, axis=0, **kwargs), index=float_frame.columns
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op", ["abs", "ceil", "cos", "cumsum", "exp", "log", "sqrt", "square"]
)
@pytest.mark.parametrize("how", ["transform", "apply"])
def test_apply_np_transformer(float_frame, op, how):
# GH 39116
# float_frame will _usually_ have negative values, which will
# trigger the warning here, but let's put one in just to be sure
float_frame.iloc[0, 0] = -1.0
warn = None
if op in ["log", "sqrt"]:
warn = RuntimeWarning
with tm.assert_produces_warning(warn):
result = getattr(float_frame, how)(op)
expected = getattr(np, op)(float_frame)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("sum", 0),
("max", np.nan),
("min", np.nan),
("all", True),
("any", False),
("mean", np.nan),
("prod", 1),
("std", np.nan),
("var", np.nan),
("median", np.nan),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("sum", 6),
("max", 3),
("min", 1),
("all", True),
("any", True),
("mean", 2),
("prod", 6),
("std", 1),
("var", 1),
("median", 2),
],
),
tm.get_cython_table_params(
Series("a b c".split()),
[
("sum", "abc"),
("max", "c"),
("min", "a"),
("all", True),
("any", True),
],
),
),
)
def test_agg_cython_table_series(series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("cumprod", Series([], Index([]), dtype=np.float64)),
("cumsum", Series([], Index([]), dtype=np.float64)),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("cumprod", | Series([np.nan, 1, 2, 6]) | pandas.Series |
import pandas as pd
import numpy as np
import math
from statistics import mean
from sklearn.decomposition import PCA
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, mean_squared_log_error
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
# root_path = os.path.abspath(os.path.join(root_path,os.path.pardir))
from metrics_ import PPTS,mean_absolute_percentage_error
def read_two_stage(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
predictions = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
records = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
test_pred=test_pred.reset_index(drop=True)
predictions = pd.concat([predictions,test_pred],axis=1)
predictions = predictions.mean(axis=1)
records = records.values.flatten()
predictions = predictions.values.flatten()
r2=r2_score(y_true=records,y_pred=predictions)
nrmse=math.sqrt(mean_squared_error(y_true=records,y_pred=predictions))/(sum(records)/len(predictions))
mae=mean_absolute_error(y_true=records,y_pred=predictions)
mape=mean_absolute_percentage_error(y_true=records,y_pred=predictions)
ppts=PPTS(y_true=records,y_pred=predictions,gamma=5)
time_cost=mean(time_cost)
return records,predictions,r2,nrmse,mae,mape,ppts,time_cost
def read_two_stage_traindev_test(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
test_predss = pd.DataFrame()
dev_predss = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
test_y = data['test_y'][0:120]
dev_y = data['dev_y'][0:120]
dev_pred=data['dev_pred'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
dev_pred=dev_pred.reset_index(drop=True)
test_pred=test_pred.reset_index(drop=True)
test_predss = pd.concat([test_predss,test_pred],axis=1)
dev_predss = pd.concat([dev_predss,dev_pred],axis=1)
test_predss = test_predss.mean(axis=1)
dev_predss = dev_predss.mean(axis=1)
test_y = test_y.values.flatten()
dev_y = dev_y.values.flatten()
test_predss = test_predss.values.flatten()
dev_predss = dev_predss.values.flatten()
test_nse=r2_score(y_true=test_y,y_pred=test_predss)
test_nrmse=math.sqrt(mean_squared_error(y_true=test_y,y_pred=test_predss))/(sum(test_y)/len(test_predss))
test_mae=mean_absolute_error(y_true=test_y,y_pred=test_predss)
test_mape=mean_absolute_percentage_error(y_true=test_y,y_pred=test_predss)
test_ppts=PPTS(y_true=test_y,y_pred=test_predss,gamma=5)
dev_nse=r2_score(y_true=dev_y,y_pred=dev_predss)
dev_nrmse=math.sqrt(mean_squared_error(y_true=dev_y,y_pred=dev_predss))/(sum(dev_y)/len(dev_predss))
dev_mae=mean_absolute_error(y_true=dev_y,y_pred=dev_predss)
dev_mape=mean_absolute_percentage_error(y_true=dev_y,y_pred=dev_predss)
dev_ppts=PPTS(y_true=dev_y,y_pred=dev_predss,gamma=5)
metrics_dict={
"dev_nse":dev_nse,
"dev_nrmse":dev_nrmse,
"dev_mae":dev_mae,
"dev_mape":dev_mape,
"dev_ppts":dev_ppts,
"test_nse":test_nse,
"test_nrmse":test_nrmse,
"test_mae":test_mae,
"test_mape":test_mape,
"test_ppts":test_ppts,
"time_cost":time_cost,
}
time_cost=mean(time_cost)
return dev_y,dev_predss,test_y,test_predss,metrics_dict
def read_two_stage_max(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
predictions = pd.DataFrame()
time_cost=[]
r2list=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
r2list.append(data['test_r2'][0])
print("one-month NSE LIST:{}".format(r2list))
max_id = r2list.index(max(r2list))
print("one-month max id:{}".format(max_id))
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(max_id+1)+".csv"
data = pd.read_csv(model_path+model_name)
records = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
records = records.values.flatten()
predictions = test_pred.values.flatten()
r2=data['test_r2'][0]
nrmse=data['test_nrmse'][0]
mae=data['test_mae'][0]
mape=data['test_mape'][0]
ppts=data['test_ppts'][0]
time_cost=data['time_cost'][0]
return records,predictions,r2,nrmse,mae,mape,ppts,time_cost
def read_pure_esvr(station):
model_path = root_path+"\\"+station+"\\projects\\esvr\\"
predictions = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_esvr_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
records = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
test_pred=test_pred.reset_index(drop=True)
predictions = pd.concat([predictions,test_pred],axis=1)
predictions = predictions.mean(axis=1)
records = records.values.flatten()
predictions = predictions.values.flatten()
r2=r2_score(y_true=records,y_pred=predictions)
nrmse=math.sqrt(mean_squared_error(y_true=records,y_pred=predictions))/(sum(records)/len(records))
mae=mean_absolute_error(y_true=records,y_pred=predictions)
mape=mean_absolute_percentage_error(y_true=records,y_pred=predictions)
ppts=PPTS(y_true=records,y_pred=predictions,gamma=5)
time_cost=mean(time_cost)
return records,predictions,r2,nrmse,mae,mape,ppts,time_cost
def read_pca_metrics(station,decomposer,start_component,stop_component,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\data\\"+wavelet_level+"\\one_step_1_month_forecast\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\data\\one_step_1_month_forecast\\"
train = pd.read_csv(model_path+"minmax_unsample_train.csv")
dev = pd.read_csv(model_path+"minmax_unsample_dev.csv")
test = pd.read_csv(model_path+"minmax_unsample_test.csv")
norm_id=pd.read_csv(model_path+"norm_unsample_id.csv")
sMax = (norm_id['series_max']).values
sMin = (norm_id['series_min']).values
# Conncat the training, development and testing samples
samples = pd.concat([train,dev,test],axis=0)
samples = samples.reset_index(drop=True)
# Renormalized the entire samples
samples = np.multiply(samples + 1,sMax - sMin) / 2 + sMin
y = samples['Y']
X = samples.drop('Y',axis=1)
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("n_components_pca_mle:{}".format(n_components_pca_mle))
mle = X.shape[1]-n_components_pca_mle
nrmse=[]
r2=[]
mae=[]
mape=[]
ppts=[]
for i in range(start_component,stop_component+1):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\one_step_1_month_forecast_with_pca_"+str(i)+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\one_step_1_month_forecast_with_pca_"+str(i)+"\\"
# averaging the trained svr with different seed
test_pred_df = | pd.DataFrame() | pandas.DataFrame |
import os
import gc
import time
import pandas as pd
from tqdm import tqdm
from copy import deepcopy
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset
from joblib import Parallel, delayed
from sklearn.model_selection import train_test_split
from src.utils.transform import *
import warnings
warnings.filterwarnings("ignore")
class SynthesizedDatabaseCreator(object):
"""
"""
def __init__(self, example_number, synthesized_path_name, image_dims):
self.IMAGE_DIMS = image_dims
self.number_points = example_number
self.cpu_count = 4
self.synthesized_path_name = synthesized_path_name
def add_pattern(cur_x, add_x):
if cur_x == 1 and add_x == 2:
return add_x
else:
return cur_x
self.add_pattern = np.vectorize(add_pattern)
def load_template_map(image_dim):
template_path = 'input/template_wafer_map.pkl'
template = pd.read_pickle(template_path)
template = cv2.resize(template.waferMap.copy(), dsize=(image_dim[0], image_dim[1]),
interpolation=cv2.INTER_NEAREST)
# 2 - паттерн
# 1 - фон
# 0 - область, где нет ничего
template[template == 2] = 1
return template
self.template_map = load_template_map(self.IMAGE_DIMS)
def sawtooth_line(self, XC_, YC_, L0_, angle_, pattern_type, line_count=1, lam_poisson=0.2, save=False,
add_patterns=[None]):
size = XC_.shape[0]
synthesized_base = [None] * size
for n in tqdm(range(size)):
step = n
template = deepcopy(self.template_map)
if add_patterns[0]:
for pattern in add_patterns:
for img_pattern in pattern:
template = self.add_pattern(template, img_pattern)
COLOR_SCALE = 2
for repeate in range(line_count):
if repeate:
step = random.randint(0, size - 1)
# иниицализация параметров прямой
L0 = L0_[step]
XC = XC_[step]
YC = YC_[step]
angle = angle_[step]
# параметры уравнения
def delta_(x, y):
return int(math.sqrt(x ** 2 + y ** 2))
delta = np.vectorize(delta_)
L = L0 - np.sum(delta(XC, YC)[1:])
N = 200
x0, y0 = 0, 0
# кусочное построение пилообразной прямой
for i in range(XC.shape[0]):
# случайное удлинение или укорочение отрезка
rand = random.randint(-1, 0)
scale = 0.4
t = np.linspace(0, L // (line_count + rand * scale), N)
xc = XC[i]
yc = YC[i]
X = np.cos(angle[i]) * t + xc + x0
Y = np.sin(angle[i]) * t + yc + y0
X_ = np.around(X)
Y_ = np.around(Y)
x_prev, y_prev = x0, y0
x_first, y_first = 0, 0
for j in range(X_.shape[0]):
x = int(X_[j])
y = int(Y_[j])
if j == 0:
# первая точка прямой
x_first, y_first = x, y
try:
if template[x, y] == 1:
template[x, y] = COLOR_SCALE
x0, y0 = x, y
except IndexError:
break
# сшивка прямых
if i != 0:
# уравнение прямой сшивки
k = (y_prev - y_first) / (x_prev - x_first + 1e-06)
b = y_first - k * x_first
X = np.linspace(x_prev, x_first, 20)
Y = k * X + b
X_ = np.around(X)
Y_ = np.around(Y)
for j in range(X_.shape[0]):
x = int(X_[j])
y = int(Y_[j])
try:
if template[x, y] == 1:
template[x, y] = COLOR_SCALE
except IndexError:
break
synthesized_base[n] = [template, pattern_type]
# для презентации
if save:
path = 'output/test_classes/{}'.format(pattern_type)
try:
os.mkdir(path)
except OSError:
pass
plt.imshow(template, cmap='inferno')
name = '/{}{}.jpg'.format(pattern_type, n)
plt.savefig(path + name)
return pd.DataFrame(synthesized_base, columns=['waferMap', 'failureType'])
@staticmethod
def add_noise(template, pattern_type, lam_poisson=0.2, dilate_time=1):
# расширение по соседу
is_dilate = random.randint(-1, 1)
if is_dilate == 1 or pattern_type == 'scratch':
kernel1 = np.ones((3, 3), np.uint8)
kernel2 = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
count_iter = random.randint(1, dilate_time)
template = cv2.dilate(template, kernel2, iterations=count_iter)
template = cv2.morphologyEx(template, cv2.MORPH_CLOSE, kernel2)
# внесем шум
noise_img = template.copy()
mask = np.random.randint(0, 2, size=noise_img.shape).astype(np.bool)
mask[noise_img == 0] = False
r = np.random.poisson(lam=lam_poisson, size=noise_img.shape)
# нормировка на величину шума
r[r == 0] = 1
r[r > 2] = 2
noise_img[mask] = r[mask]
# расширение
# kernel = np.ones((3, 3), np.uint8)
kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
noise_img = cv2.morphologyEx(noise_img, cv2.MORPH_CLOSE, kernel)
# if pattern_type != 'scratch':
# noise_img = cv2.erode(noise_img, kernel, iterations=1)
return noise_img
def generator_scratch(self, mode=0, plot=False, line_count=1, add_patterns=[None], is_noised=False):
print('[INFO] Create scratches')
# число синтезированных карт
N_POINTS = self.number_points // 2
line_part = 5 # сегментов в одной линии
# суммарная длина отрезка
L0 = np.random.randint(0.2 * self.IMAGE_DIMS[0], 0.45 * self.IMAGE_DIMS[0], size=N_POINTS)
# X координата старта прямой
xc = [np.random.randint(0.2 * self.IMAGE_DIMS[0], 0.5 * self.IMAGE_DIMS[0], size=N_POINTS)]
for _ in range(line_part - 1):
# смещение по x для старта следующей прямой
delta_xc = np.random.randint(0.01 * self.IMAGE_DIMS[0], 0.02 * self.IMAGE_DIMS[0] + 2, size=N_POINTS)
np.random.shuffle(delta_xc)
xc.append(delta_xc)
# merge под формат генератора
xc = np.array(xc).T
np.random.shuffle(xc)
# Y координата старта прямой
yc = [np.random.randint(0.3 * self.IMAGE_DIMS[0], 0.7 * self.IMAGE_DIMS[0], size=N_POINTS)]
for _ in range(line_part - 1):
# смещение по x для старта следующей прямой
delta_yc = np.random.randint(0.01 * self.IMAGE_DIMS[0], 0.02 * self.IMAGE_DIMS[0] + 2, size=N_POINTS)
np.random.shuffle(delta_yc)
yc.append(delta_yc)
# merge под формат генератора
yc = np.array(yc).T
np.random.shuffle(yc)
# углы наклона для каждого отрезка
angle = [np.random.randint(-50, 50, size=N_POINTS) * np.pi / 180]
for _ in range(line_part - 1):
part_angle = np.random.randint(30, 40, size=N_POINTS) * np.pi / 180 * np.sign(angle[0])
angle.append(part_angle)
angle = np.array(angle).T
np.random.shuffle(angle)
df_scratch_curved = None
if mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.sawtooth_line)(xc[i::n_workers], yc[i::n_workers],
L0[i::n_workers], angle[i::n_workers],
pattern_type='Scratch',
line_count=line_count,
add_patterns=add_patterns)
for i in range(n_workers))
df_scratch_curved = results[0]
for i in range(1, len(results)):
df_scratch_curved = pd.concat((df_scratch_curved, results[i]), sort=False)
if is_noised:
df_scratch_curved.waferMap = df_scratch_curved.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='scratch',
lam_poisson=0.3))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(15, 10))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_scratch_curved.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_scratch_curved
def create_rings(self, XC, YC, R_, PHI, N, pattern_type, lam_poisson=1.2, save=False, add_patterns=[None]):
color_scale = 2
size = XC.shape[0]
synthesized_base = [None] * size
for n in tqdm(range(size)):
# тестовый полигон
template = deepcopy(self.template_map)
if add_patterns[0]:
for pattern in add_patterns:
for img_pattern in pattern:
template = self.add_pattern(template, img_pattern)
# параметры кольца
phi = np.linspace(PHI[n][0], PHI[n][1], N[n])
r = np.linspace(R_[n][0], R_[n][1], N[n])
xc = XC[n]
yc = YC[n]
# синтез сетки
R, Fi = np.meshgrid(r, phi)
X = R * (np.cos(Fi)) + xc
Y = R * (np.sin(Fi)) + yc
X_ = np.around(X)
Y_ = np.around(Y)
# индексы для полигона
points = []
for i in range(X_.shape[0]):
for j in range(X_.shape[1]):
x = X_[i, j]
y = Y_[i, j]
points.append((x, y))
for idx in points:
i, j = idx
i = int(round(i))
j = int(round(j))
try:
if template[i, j] == 1:
template[i, j] = color_scale
except IndexError:
break
synthesized_base[n] = [template, pattern_type]
# для презентации
if save:
path = 'output/test_classes/{}'.format(pattern_type)
try:
os.mkdir(path)
except OSError:
pass
plt.imshow(template, cmap='inferno')
name = '/{}{}.jpg'.format(pattern_type, n)
plt.savefig(path + name)
return pd.DataFrame(synthesized_base, columns=['waferMap', 'failureType'])
def generator_donut(self, mode=0, plot=False, add_patterns=None, is_noised=False):
print('[INFO] Create donuts')
# число синтезированных карт
N_POINTS = self.number_points
PHI = None
for i in range(4):
# угол старта для сектора
phi1 = np.random.uniform(0 + 95 * i, 30 + 95 * i, size=N_POINTS // 4) * np.pi / 180
# угол конца для сектора
phi2 = np.random.uniform(180 + 90 * i, 360 * (i + 1), size=N_POINTS // 4) * np.pi / 180
phi = np.vstack((phi1, phi2))
# merge под формат генератора
phi = np.array([[phi[0, j], phi[1, j]] for j in range(phi.shape[1])])
if i == 0:
PHI = phi
else:
PHI = np.vstack((PHI, phi))
# радиус внутреннего круга
r1 = np.random.randint(0.15 * self.IMAGE_DIMS[0], 0.3 * self.IMAGE_DIMS[0], size=N_POINTS)
# радиус внешнего круга
r2 = np.random.randint(0.33 * self.IMAGE_DIMS[0], 0.4 * self.IMAGE_DIMS[0], size=N_POINTS)
r = np.vstack((r1, r2))
# merge под формат генератора
r = np.array([[r[0, i], r[1, i]] for i in range(r.shape[1])])
# X координата старта прямой
XC = np.random.randint(0.45 * self.IMAGE_DIMS[0], 0.55 * self.IMAGE_DIMS[0], size=N_POINTS)
# Y координата старта прямой
YC = np.random.randint(0.45 * self.IMAGE_DIMS[0], 0.55 * self.IMAGE_DIMS[0], size=N_POINTS)
# интесивность
N = np.random.randint(200, 210, size=N_POINTS)
df_donut = None
if mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.create_rings)(XC[i::n_workers], YC[i::n_workers],
r[i::n_workers], PHI[i::n_workers],
N[i::n_workers], pattern_type='Donut',
add_patterns=add_patterns)
for i in range(n_workers))
df_donut = results[0]
for i in range(1, len(results)):
df_donut = pd.concat((df_donut, results[i]), sort=False)
if is_noised:
df_donut.waferMap = df_donut.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='donut',
lam_poisson=0.9,
dilate_time=4))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_donut.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_donut
def generator_loc(self, mode=0, plot=False, add_patterns=[None], is_noised=True):
print('[INFO] Create loc')
# число синтезированных карт
N_POINTS = self.number_points
PHI = None
for i in range(4):
# угол старта для сектора
phi1 = np.random.uniform(95 * i, 55 + 90 * i, size=N_POINTS // 4) * np.pi / 180
# угол конца для сектора
phi2 = np.random.uniform(65 + 90 * i, 95 * (i + 1), size=N_POINTS // 4) * np.pi / 180
phi = np.vstack((phi1, phi2))
# merge под формат генератора
phi = np.array([[phi[0, j], phi[1, j]] for j in range(phi.shape[1])])
if i == 0:
PHI = phi
else:
PHI = np.vstack((PHI, phi))
# радиус внутреннего круга
r1 = np.random.randint(0.1 * self.IMAGE_DIMS[0], 0.2 * self.IMAGE_DIMS[0], size=N_POINTS)
# радиус внешнего круга
r2 = np.random.randint(0.2 * self.IMAGE_DIMS[0], 0.25 * self.IMAGE_DIMS[0], size=N_POINTS)
r = np.vstack((r1, r2))
# merge под формат генератора
r = np.array([[r[0, i], r[1, i]] for i in range(r.shape[1])])
# X координата старта прямой
XC = np.random.randint(0.45 * self.IMAGE_DIMS[0], 0.55 * self.IMAGE_DIMS[0], size=N_POINTS)
# Y координата старта прямой
YC = np.random.randint(0.45 * self.IMAGE_DIMS[0], 0.55 * self.IMAGE_DIMS[0], size=N_POINTS)
# интесивность
N = np.random.randint(200, 210, size=N_POINTS)
df_loc = None
if mode == 1:
# генератор для презенташки
df_loc = self.create_rings(XC, YC, r, PHI, N, pattern_type='Loc', save=True)
elif mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.create_rings)(XC[i::n_workers], YC[i::n_workers],
r[i::n_workers], PHI[i::n_workers],
N[i::n_workers], pattern_type='Loc',
add_patterns=add_patterns)
for i in range(n_workers))
df_loc = results[0]
for i in range(1, len(results)):
df_loc = pd.concat((df_loc, results[i]), sort=False)
if is_noised:
df_loc.waferMap = df_loc.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='scratch',
lam_poisson=0.3))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_loc.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_loc
def generator_center(self, mode=0, plot=False, add_patterns=[None], is_noised=True):
print('[INFO] Create center')
# число синтезированных карт
N_POINTS = self.number_points
PHI = None
for i in range(4):
# угол старта для сектора
phi1 = np.random.uniform(95 * i, 10 + 90 * i, size=N_POINTS // 4) * np.pi / 180
# угол конца для сектора
phi2 = np.random.uniform(45 + 90 * i, 95 * (i + 1), size=N_POINTS // 4) * np.pi / 180
phi = np.vstack((phi1, phi2))
# merge под формат генератора
phi = np.array([[phi[0, j], phi[1, j]] for j in range(phi.shape[1])])
if i == 0:
PHI = phi
else:
PHI = np.vstack((PHI, phi))
# радиус внутреннего круга
r1 = np.random.randint(0.0 * self.IMAGE_DIMS[0], 0.05 * self.IMAGE_DIMS[0], size=N_POINTS)
# радиус внешнего круга
r2 = np.random.randint(0.12 * self.IMAGE_DIMS[0], 0.23 * self.IMAGE_DIMS[0], size=N_POINTS)
r = np.vstack((r1, r2))
# merge под формат генератора
r = np.array([[r[0, i], r[1, i]] for i in range(r.shape[1])])
# X координата старта прямой
XC = np.random.randint(0.48 * self.IMAGE_DIMS[0], 0.5 * self.IMAGE_DIMS[0], size=N_POINTS)
# Y координата старта прямой
YC = np.random.randint(0.48 * self.IMAGE_DIMS[0], 0.5 * self.IMAGE_DIMS[0], size=N_POINTS)
# интесивность
N = np.random.randint(200, 210, size=N_POINTS)
if mode == 1:
# генератор для презенташки
df_center = self.create_rings(XC, YC, r, PHI, N, pattern_type='Center', save=True)
elif mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.create_rings)(XC[i::n_workers], YC[i::n_workers],
r[i::n_workers], PHI[i::n_workers],
N[i::n_workers], pattern_type='Center',
add_patterns=add_patterns)
for i in range(n_workers))
df_center = results[0]
for i in range(1, len(results)):
df_center = pd.concat((df_center, results[i]), sort=False)
if is_noised:
df_center.waferMap = df_center.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='scratch',
lam_poisson=0.3))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_center.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_center
def generator_edge_ring(self, mode=0, plot=False, add_patterns=[None], is_noised=True):
print('[INFO] Create edge_ring')
# число синтезированных карт
N_POINTS = self.number_points
PHI = None
for i in range(4):
# угол старта для сектора
phi1 = np.random.uniform(0 + 90 * i, 30 + 90 * i, size=N_POINTS // 4) * np.pi / 180
# угол конца для сектора
phi2 = np.random.uniform(320 + 90 * i,
360 * (i + 1), size=N_POINTS // 4) * np.pi / 180
phi = np.vstack((phi1, phi2))
# merge под формат генератора
phi = np.array([[phi[0, j], phi[1, j]] for j in range(phi.shape[1])])
if i == 0:
PHI = phi
else:
PHI = np.vstack((PHI, phi))
center = 0.5 * self.IMAGE_DIMS[0]
r1 = np.random.randint(center - 4, center - 3, size=N_POINTS)
r2 = np.random.randint(center, center + 1, size=N_POINTS)
r = np.vstack((r1, r2))
r = np.array([[r[0, i], r[1, i]] for i in range(r.shape[1])])
# X координата старта прямой
XC = np.random.randint(center - 2, center, size=N_POINTS)
# Y координата старта прямой
YC = np.random.randint(center - 2, center, size=N_POINTS)
# интесивность
N = np.random.randint(200, 210, size=N_POINTS)
df_edge_ring = None
if mode == 1:
# генератор для презенташки
df_edge_ring = self.create_rings(XC, YC, r, PHI, N, pattern_type='Edge-Ring', save=True)
elif mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.create_rings)(XC[i::n_workers], YC[i::n_workers],
r[i::n_workers], PHI[i::n_workers],
N[i::n_workers], pattern_type='Edge-Ring',
add_patterns=add_patterns)
for i in range(n_workers))
df_edge_ring = results[0]
for i in range(1, len(results)):
df_edge_ring = pd.concat((df_edge_ring, results[i]), sort=False)
if is_noised:
df_edge_ring.waferMap = df_edge_ring.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='scratch',
lam_poisson=0.3))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_edge_ring.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_edge_ring
def generator_edge_loc(self, mode=0, plot=False, add_patterns=[None], is_noised=True):
print('[INFO] Create edge_loc')
# число синтезированных карт
N_POINTS = self.number_points
PHI = None
for i in range(4):
# угол старта для сектора
phi1 = np.random.uniform(15 + 90 * i, 25 + 90 * i, size=N_POINTS // 4) * np.pi / 180
# угол конца для сектора
phi2 = np.random.uniform(55 + 90 * i, 115 + 90 * i, size=N_POINTS // 4) * np.pi / 180
phi = np.vstack((phi1, phi2))
# merge под формат генератора
phi = np.array([[phi[0, j], phi[1, j]] for j in range(phi.shape[1])])
if i == 0:
PHI = phi
else:
PHI = np.vstack((PHI, phi))
center = 0.5 * self.IMAGE_DIMS[0]
r1 = np.random.randint(center - 5, center - 3, size=N_POINTS)
r2 = np.random.randint(center, center + 1, size=N_POINTS)
r = np.vstack((r1, r2))
r = np.array([[r[0, i], r[1, i]] for i in range(r.shape[1])])
# X координата старта прямой
XC = np.random.randint(center - 2, center - 1, size=N_POINTS)
# Y координата старта прямой
YC = np.random.randint(center - 2, center - 1, size=N_POINTS)
# интесивность
N = np.random.randint(200, 210, size=N_POINTS)
df_edge_loc = None
if mode == 1:
# генератор для презенташки
df_edge_loc = self.create_rings(XC, YC, r, PHI, N, pattern_type='Edge-Loc', save=True)
elif mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.create_rings)(XC[i::n_workers], YC[i::n_workers],
r[i::n_workers], PHI[i::n_workers],
N[i::n_workers], pattern_type='Edge-Loc',
add_patterns=add_patterns)
for i in range(n_workers))
df_edge_loc = results[0]
for i in range(1, len(results)):
df_edge_loc = pd.concat((df_edge_loc, results[i]), sort=False)
if is_noised:
df_edge_loc.waferMap = df_edge_loc.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='scratch',
lam_poisson=0.3))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_edge_loc.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_edge_loc
def create_near_full(self, capacity, pattern_type, lam_poisson=1.2, save=False):
synthesized_base = [None] * capacity
for step in range(capacity):
# тестовый полигон
template = deepcopy(self.template_map)
# внесем шум
noise_img = deepcopy(template)
mask = np.random.randint(0, 2, size=noise_img.shape).astype(np.bool)
mask[noise_img == 0] = False
r = np.random.poisson(lam=lam_poisson, size=noise_img.shape)
# нормировка на шумы
# r = np.around(r//np.max(r))
r[r == 0] = 1
r[r == 1] = 2
r[r > 2] = 1
noise_img[mask] = r[mask]
# сверткой расширим
kernel = np.ones((3, 3), np.uint8)
kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
noise_img = cv2.morphologyEx(noise_img, cv2.MORPH_CLOSE, kernel)
noise_img = cv2.erode(noise_img, kernel, iterations=1)
synthesized_base[step] = [noise_img, pattern_type]
# для презенташки
if save:
path = 'output/test_classes/{}'.format(pattern_type)
try:
os.mkdir(path)
except OSError:
pass
plt.imshow(noise_img, cmap='inferno')
name = '/{}{}.jpg'.format(pattern_type, step)
plt.savefig(path + name)
return pd.DataFrame(synthesized_base, columns=['waferMap', 'failureType'])
def generator_near_full(self, plot=False):
print('[INFO] Create near_full')
# число синтезированных карт
N_POINTS = self.number_points
df_near_full = self.create_near_full(N_POINTS, pattern_type='Near-full', lam_poisson=1.3)
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_near_full.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_near_full
def create_random(self, capacity, pattern_type, lam_poisson=1.2, save=False):
synthesized_base = [None] * capacity
for step in tqdm(range(capacity)):
# тестовый полигон
template = deepcopy(self.template_map)
# внесем шум
noise_img = deepcopy(template)
mask = np.random.randint(0, 2, size=noise_img.shape).astype(np.bool)
mask[noise_img == 0] = False
r = np.random.poisson(lam=lam_poisson, size=noise_img.shape)
# нормировка на шумы
# r = np.around(r//np.max(r))
r[r == 0] = 1
r[r > 2] = 2
noise_img[mask] = r[mask]
# сверткой расширим
kernel = np.ones((3, 3), np.uint8)
kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
noise_img = cv2.morphologyEx(noise_img, cv2.MORPH_CLOSE, kernel)
noise_img = cv2.erode(noise_img, kernel, iterations=1)
synthesized_base[step] = [noise_img, pattern_type]
# для презенташки
if save:
path = 'output/test_classes/{}'.format(pattern_type)
try:
os.mkdir(path)
except OSError:
pass
plt.imshow(noise_img, cmap='inferno')
name = '/{}{}.jpg'.format(pattern_type, step)
plt.savefig(path + name)
return pd.DataFrame(synthesized_base, columns=['waferMap', 'failureType'])
def generator_random(self, plot=False):
print('[INFO] Create random')
# число синтезированных карт
N_POINTS = self.number_points
df_random = self.create_random(N_POINTS, pattern_type='none', lam_poisson=2.1)
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, id in enumerate(sample_idx):
ax[i].imshow(df_random.waferMap.values[id], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_random
def create_synthesized_database(self, classes, is_noised):
df_scratch_curved = [self.generator_scratch(mode=0, plot=False, line_count=i+1,
add_patterns=[None], is_noised=is_noised)
for i in range(2)]
df_scratch = pd.concat(df_scratch_curved, ignore_index=True)
df_donut = self.generator_donut(mode=0, plot=False, add_patterns=[None], is_noised=is_noised)
df_loc = self.generator_loc(mode=0, plot=False, add_patterns=[None], is_noised=is_noised)
df_center = self.generator_center(mode=0, plot=False, add_patterns=[None], is_noised=is_noised)
df_edge_ring = self.generator_edge_ring(mode=0, plot=False, add_patterns=[None], is_noised=is_noised)
df_edge_loc = self.generator_edge_loc(mode=0, plot=False, add_patterns=[None], is_noised=is_noised)
df_random = self.generator_random(plot=False)
data = [df_center, df_donut, df_loc,
df_scratch, df_edge_ring, df_edge_loc,
df_random
]
df = pd.concat(data[:classes], sort=False)
mapping_type = {'Center': 0, 'Donut': 1, 'Loc': 2,
'Scratch': 3, 'Edge-Ring': 4, 'Edge-Loc': 5,
'none': 6
}
mapping_type = dict(list(iter(mapping_type.items()))[:classes])
df['failureNum'] = df.failureType
df = df.replace({'failureNum': mapping_type})
df.to_pickle('input/' + self.synthesized_path_name)
return True
class TrainingDatabaseCreator(object):
"""
"""
def __init__(self, database_only_patterns_path):
self.full_database_path = 'input/LSWMD.pkl'
self.database_only_patterns_path = 'input/' + database_only_patterns_path
self.IMAGE_DIMS = (1, 96, 96)
def read_full_data(self, synthesized_path_name=None):
"""
:param synthesized_path_name:
:return:
"""
print('[INFO] Reading databases...')
start_time = time.time()
try:
full_real_database = pd.read_pickle(self.database_only_patterns_path)
except FileNotFoundError:
print('[INFO] Prepared full database not found\n'
'Loading full database...'
f'Create {self.database_only_patterns_path} database')
full_real_database = pd.read_pickle(self.full_database_path)
mapping_type = {'Center': 0, 'Donut': 1, 'Loc': 2,
'Scratch': 3, 'Edge-Ring': 4, 'Edge-Loc': 5,
'none': 6, 'Near-full': 7, 'Random': 8}
full_real_database['failureNum'] = full_real_database.failureType
full_real_database = full_real_database.replace({'failureNum': mapping_type})
full_real_database = full_real_database[(full_real_database['failureNum'] >= 0) &
(full_real_database['failureNum'] <= 5)]
full_real_database = full_real_database.reset_index()
full_real_database = full_real_database.drop(labels=['dieSize', 'lotName', 'waferIndex',
'trianTestLabel', 'index'], axis=1)
######################
# Get fixed size of maps
out_map = []
out_class = []
dim_size = 40
for index, row in full_real_database.iterrows():
waf_map = row.waferMap
waf_type = row.failureType
if waf_map.shape[0] > dim_size and waf_map.shape[1] > dim_size:
out_map += [waf_map]
out_class += [waf_type[0][0]]
database = pd.DataFrame(data=np.vstack((out_map, out_class)).T, columns=['waferMap', 'failureType'])
database['failureNum'] = database.failureType
database = database.replace({'failureNum': mapping_type})
full_real_database = database
database.to_pickle(self.database_only_patterns_path)
synthesized_database = None
if synthesized_path_name:
synthesized_database = pd.read_pickle('input/' + synthesized_path_name)
else:
print('[INFO] Synthesized database not found')
print('reserved time: {:.2f}s'.format(time.time() - start_time))
return full_real_database, synthesized_database
def make_training_database(self, synthesized_path_name, failure_types_ratio):
full_real_database, synthesized_database = self.read_full_data(synthesized_path_name)
print('[INFO] Making train/test/val databases...')
start_time = time.time()
try:
synthesized_database['failureType'] = synthesized_database['failureType'].map(lambda label: label)
except TypeError:
print('Please, enter a path of the synthesized database')
return None
full_real_database['waferMap'] = full_real_database['waferMap'].map(lambda waf_map:
cv2.resize(waf_map,
dsize=(self.IMAGE_DIMS[1],
self.IMAGE_DIMS[1]),
interpolation=cv2.INTER_NEAREST))
training_database = synthesized_database
testing_database = None
for failure_type in failure_types_ratio:
train_real, test_real = train_test_split(full_real_database[full_real_database.failureType == failure_type],
train_size=failure_types_ratio[failure_type],
random_state=42,
shuffle=True)
training_database = | pd.concat([training_database, train_real], sort=False) | pandas.concat |
import numpy as np
import pandas as pd
import indicators
import config
import util
import sys
from pathlib import Path
def get_algo_dataset(choose_set_num: int):
"""run_set = ['goldman', 'index', '^BVSP', '^TWII', '^IXIC', 'index_sampled']
Returns df_list, date_range, trend_list, stocks
"""
# Do not change run_set order. The order is hardcoded into below code
run_set = ['goldman', 'index', '^BVSP', '^TWII', '^IXIC', 'index_sampled']
choose_set = run_set[choose_set_num]
df_list = []
date_range = []
trend_list = []
stocks = []
### For GS stocks: 'GGSIX', 'GOIIX', 'GIPIX'
if choose_set == run_set[0]:
# Must be same order
stocks = ['GGSIX', 'GOIIX', 'GIPIX']
folder = ['growth', 'growth_income', 'balanced']
for i, stock in enumerate(stocks):
df=pd.read_csv('data/goldman/portfolio/{}/{}.csv'.format(folder[i],stock), usecols=config.column_names, parse_dates=['Date'])
df = df[df['Close'] > 0].reset_index(drop=True)
df['returns'] = indicators.day_gain(df, 'Close').dropna()
df_list.append(df)
start = '1/1/2016'
end = '31/12/2018'
# date_range = df_list[0][(df_list[0]['Date'] >= df_list[1].iloc[0]['Date']) & (df_list[0]['Date'] >= df_list[2].iloc[0]['Date'])]['Date'].tolist()
date_range = remove_uncommon_dates(df_list)
trend_list = util.get_trend_list(stocks, df_list, start=start, end=end)
### For Index stocks: '^BVSP', '^TWII', '^IXIC'
elif choose_set == run_set[1]:
stocks = ['^BVSP', '^TWII', '^IXIC']
high_risk_df = pd.read_csv('data/indexes/{}.csv'.format('^BVSP'), usecols=config.column_names, parse_dates=['Date'])
high_risk_df = high_risk_df[high_risk_df['Close'] > 0].reset_index(drop=True)
high_risk_df['returns'] = indicators.day_gain(high_risk_df, 'Close').dropna()
med_risk_df = pd.read_csv('data/indexes/{}.csv'.format('^TWII'), usecols=config.column_names, parse_dates=['Date'])
med_risk_df = med_risk_df[med_risk_df['Close'] > 0].reset_index(drop=True)
med_risk_df['returns'] = indicators.day_gain(med_risk_df, 'Close').dropna()
low_risk_df = pd.read_csv('data/indexes/{}.csv'.format('^IXIC'), parse_dates=['Date'])
# IXIC dates are reversed
low_risk_df = low_risk_df.reindex(index=low_risk_df.index[::-1])
low_risk_df = low_risk_df[low_risk_df['Close'] > 0].reset_index(drop=True)
low_risk_df['returns'] = indicators.day_gain(low_risk_df, 'Close').dropna()
df_list = [high_risk_df, med_risk_df, low_risk_df]
start = '1/1/2014'
end = '31/12/2018'
# date_range = high_risk_df[(high_risk_df['Date'] >= med_risk_df.iloc[0]['Date']) & (high_risk_df['Date'] >= low_risk_df.iloc[0]['Date'])]['Date'].tolist()
date_range = remove_uncommon_dates(df_list)
trend_list = util.get_trend_list(stocks, df_list, start=start, end=end)
elif choose_set == run_set[2] or choose_set == run_set[3] or choose_set == run_set[4]:
stocks =[]
folder =''
if choose_set == run_set[2]:
stocks = ['EQTL3.SA', 'ITSA4.SA', 'PETR3.SA']
folder = '^BVSP'
elif choose_set==run_set[3]:
stocks = ['1326.TW', '2882.TW', '3008.TW']
folder = '^TWII'
elif choose_set==run_set[4]:
stocks = ['TSLA', 'IBKC', 'FEYE']
folder = '^IXIC'
else:
print('An error occured in fetching the data for algo stocks.')
for stock in stocks:
df=pd.read_csv('data/algo/{}/{}.csv'.format(folder,stock), usecols=config.column_names, parse_dates=['Date'])
df = df[df['Close'] > 0].reset_index(drop=True)
df['returns'] = indicators.day_gain(df, 'Close').dropna()
df_list.append(df)
start = '1/1/2014'
end = '31/12/2018'
date_range = remove_uncommon_dates(df_list)
trend_list = util.get_trend_list(stocks, df_list, start=start, end=end)
elif choose_set == run_set[5]:
stocks =['^BVSP', '^TWII', '^IXIC']
for stock in stocks:
df=pd.read_csv('data/algo/{}/daily_price.csv'.format(stock), parse_dates=['Date'])
df = df[df['Close'] > 0].reset_index(drop=True)
df['returns'] = indicators.day_gain(df, 'Close').dropna()
df_list.append(df)
start = '1/1/2014'
end = '31/12/2018'
date_range = remove_uncommon_dates(df_list)
trend_list = util.get_trend_list(stocks, df_list, start=start, end=end)
return df_list, date_range, trend_list, stocks
def get_algo_results(choose_set_num: int, asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list, cal_avg_nav=False):
"""Returns the change list and final asset value
"""
change_list = []
average_asset = 0
if cal_avg_nav:
if choose_set_num == 0:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
[8.0, 6.0, 12.0], [9.0, 5.0, 9.0], [6.0, 12.0, 6.0], [0.9712034471256101, -1.6709072749507035, -1.0777099909032646], [-3.4145406491989023, -0.18272123074956848, -0.7245604433339186], 0.0816132948369838)
# [8.0, 8.0, 4.0], [5.0, 6.0, 5.0], [5.0, 2.0, 3.0], [0.22948733470032123, 0.8909251765940478, -0.20656673058505381], [-1.7417846430478365, -0.4628863373977188, 1.5419043896500977], 0.14266550931364091)
# [21.0, 6.0, 5.0], [2.0, 2.0, 6.0], [27.0, 12.0, 3.0], [3.125115822639779, -2.561089882241202, -1.4940972093691949], [1.2063367792987396, 1.4663555035726752, -0.2846560129041551], 0.1614246940280476)
elif choose_set_num == 1:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [8.0, 6.0, 12.0], [9.0, 5.0, 9.0], [6.0, 12.0, 6.0], [0.9712034471256101, -1.6709072749507035, -1.0777099909032646], [-3.4145406491989023, -0.18272123074956848, -0.7245604433339186], 0.0816132948369838)
# [5.0, 5.0, 6.0], [5.0, 6.0, 6.0], [19.0, 5.0, 8.0], [1.8954915289833882, -1.450482294216655, 1.125418440357023], [-2.3676311336976132, -1.8970317071693157, 0.23699516374694385], 0.046795990258734835)
[8.0, 14.0, 11.0], [11.0, 11.0, 2.0], [15.0, 10.0, 2.0], [1.363647435463774, 2.716953337278016, -4.324164482875698], [-1.7062595953617727, 2.5105760118208957, -4.060094673509836], 0.07240419552333409)
elif choose_set_num == 2:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [4.0, 2.0, 4.0], [4.0, 8.0, 9.0], [6.0, 4.0, 7.0], [0.6078976284270344, 1.2577097768694967, 2.0213163271738006], [-2.566918900257593, 2.90468608230902, -1.7097040021899894], 0.07797085783765784)
[3.0, 3.0, 13.0], [11.0, 5.0, 9.0], [8.0, 4.0, 18.0], [0.06083023158629253, 0.5601483772918827, 1.9569019466459423], [-1.3881334364246258, 2.8163651325079524, 0.9492765355184316], 0.15511606897450375)
elif choose_set_num == 3:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
[3.0, 9.0, 4.0], [4.0, 14.0, 3.0], [2.0, 2.0, 16.0], [0.30059198706758106, 1.0952845039110184, 1.8392867588452613], [2.771352403174757, -1.3669589385046343, -2.3406274217770866], 0.17345428438145236)
elif choose_set_num == 4:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [9.0, 7.0, 9.0], [4.0, 3.0, 7.0], [6.0, 5.0, 15.0], [0.9351583394555885, 1.3754760765507819, 2.348134831028588], [-2.471478593919233, 1.379869639191209, 4.95188889034387], 0.1444277817979811)
# [8.0, 11.0, 2.0], [6.0, 8.0, 6.0], [7.0, 8.0, 12.0], [1.1255518400058317, -0.36346414388153225, -1.0247284676654485], [-0.6274220138552453, -1.1083765565671055, 0.00449200835519481], 0.13718457807344167)
[2.0, 5.0, 11.0], [4.0, 2.0, 2.0], [7.0, 5.0, 5.0], [0.2774502065258735, 0.16677941009065034, -0.45385907412444926], [-0.2098008442952385, 1.289022800463935, 2.003346238448586], 0.15779763053682244)
elif choose_set_num == 5:
average_asset, asset_list, portfolio_comp = util.cal_portfolio_changed_nav(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [7.0, 12.0, 3.0], [2.0, 7.0, 2.0], [13.0, 3.0, 8.0], [2.522702769828708, -0.5707216899389504, 0.8348229423350395], [-1.7493395408023145, 1.0817636863501934, 0.8232680695157204], 0.1963583867900387)
[4.0, 6.0, 3.0], [2.0, 4.0, 7.0], [14.0, 2.0, 5.0], [1.3929077534652725, 0.18393055682065484, 2.6440755858307075], [-1.601189152927202, 1.3377505947800103, -1.9787536808104849], 0.13726920065461523)
else:
print('ERROR! Wrong choose_set_num')
return average_asset, asset_list, portfolio_comp
else:
if choose_set_num == 0:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
[8.0, 6.0, 12.0], [9.0, 5.0, 9.0], [6.0, 12.0, 6.0], [0.9712034471256101, -1.6709072749507035, -1.0777099909032646], [-3.4145406491989023, -0.18272123074956848, -0.7245604433339186], 0.0816132948369838)
# [8.0, 8.0, 4.0], [5.0, 6.0, 5.0], [5.0, 2.0, 3.0], [0.22948733470032123, 0.8909251765940478, -0.20656673058505381], [-1.7417846430478365, -0.4628863373977188, 1.5419043896500977], 0.14266550931364091)
# [21.0, 6.0, 5.0], [2.0, 2.0, 6.0], [27.0, 12.0, 3.0], [3.125115822639779, -2.561089882241202, -1.4940972093691949], [1.2063367792987396, 1.4663555035726752, -0.2846560129041551], 0.1614246940280476)
elif choose_set_num == 1:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [8.0, 6.0, 12.0], [9.0, 5.0, 9.0], [6.0, 12.0, 6.0], [0.9712034471256101, -1.6709072749507035, -1.0777099909032646], [-3.4145406491989023, -0.18272123074956848, -0.7245604433339186], 0.0816132948369838)
# [5.0, 5.0, 6.0], [5.0, 6.0, 6.0], [19.0, 5.0, 8.0], [1.8954915289833882, -1.450482294216655, 1.125418440357023], [-2.3676311336976132, -1.8970317071693157, 0.23699516374694385], 0.046795990258734835)
[8.0, 14.0, 11.0], [11.0, 11.0, 2.0], [15.0, 10.0, 2.0], [1.363647435463774, 2.716953337278016, -4.324164482875698], [-1.7062595953617727, 2.5105760118208957, -4.060094673509836], 0.07240419552333409)
elif choose_set_num == 2:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [4.0, 2.0, 4.0], [4.0, 8.0, 9.0], [6.0, 4.0, 7.0], [0.6078976284270344, 1.2577097768694967, 2.0213163271738006], [-2.566918900257593, 2.90468608230902, -1.7097040021899894], 0.07797085783765784)
[3.0, 3.0, 13.0], [11.0, 5.0, 9.0], [8.0, 4.0, 18.0], [0.06083023158629253, 0.5601483772918827, 1.9569019466459423], [-1.3881334364246258, 2.8163651325079524, 0.9492765355184316], 0.15511606897450375)
elif choose_set_num == 3:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
[3.0, 9.0, 4.0], [4.0, 14.0, 3.0], [2.0, 2.0, 16.0], [0.30059198706758106, 1.0952845039110184, 1.8392867588452613], [2.771352403174757, -1.3669589385046343, -2.3406274217770866], 0.17345428438145236)
elif choose_set_num == 4:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [9.0, 7.0, 9.0], [4.0, 3.0, 7.0], [6.0, 5.0, 15.0], [0.9351583394555885, 1.3754760765507819, 2.348134831028588], [-2.471478593919233, 1.379869639191209, 4.95188889034387], 0.1444277817979811)
# [8.0, 11.0, 2.0], [6.0, 8.0, 6.0], [7.0, 8.0, 12.0], [1.1255518400058317, -0.36346414388153225, -1.0247284676654485], [-0.6274220138552453, -1.1083765565671055, 0.00449200835519481], 0.13718457807344167)
[2.0, 5.0, 11.0], [4.0, 2.0, 2.0], [7.0, 5.0, 5.0], [0.2774502065258735, 0.16677941009065034, -0.45385907412444926], [-0.2098008442952385, 1.289022800463935, 2.003346238448586], 0.15779763053682244)
elif choose_set_num == 5:
change_list, asset_list, portfolio_comp = util.cal_portfolio_comp_fitness(asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list,
# [7.0, 12.0, 3.0], [2.0, 7.0, 2.0], [13.0, 3.0, 8.0], [2.522702769828708, -0.5707216899389504, 0.8348229423350395], [-1.7493395408023145, 1.0817636863501934, 0.8232680695157204], 0.1963583867900387)
[4.0, 6.0, 3.0], [2.0, 4.0, 7.0], [14.0, 2.0, 5.0], [1.3929077534652725, 0.18393055682065484, 2.6440755858307075], [-1.601189152927202, 1.3377505947800103, -1.9787536808104849], 0.13726920065461523)
else:
print('ERROR! Wrong choose_set_num')
return change_list, asset_list, portfolio_comp
def gen_algo_data(run_set: list, choose_set_num: int, save_algo_data=False, save_passive=False, save_sub_folder='', is_rl_data=False, base_rates=[], portfolio_comp=[]):
df_list, date_range, trend_list, stocks = util.get_algo_dataset(choose_set_num)
# this is an afterthought
if base_rates == []:
base_rates = [0.2, 0.2, 0.2]
if portfolio_comp == []:
portfolio_comp = [base_rates[i] + [0.4/3, 0.4/3, 0.4/3][i] for i in range(len(base_rates))]
asset_list = [100000, 100000, 100000]
change_list = []
# print('Initial portfolio composition: {}'.format(portfolio_comp))
change_list,_,_ = util.get_algo_results(choose_set_num, asset_list, base_rates, portfolio_comp, df_list, date_range, trend_list)
print('Reallocated {} times'.format(len([i for i in change_list if i[0]])))
# print([i[1] for i in change_list if i[0]])
nav_daily_dates_list = []
nav_daily_composition_list = [[], [], []]
nav_daily_net_list = []
daily_price_list = []
asset_list = [100000, 100000, 100000]
nav_daily_adjust_list = [i[0] for i in change_list]
j = 0
last_trade_date = date_range[0]
for date in date_range:
# Generate daily NAV value for visualisation
high_risk_date = df_list[0][df_list[0]['Date'] == date]
med_risk_date = df_list[1][df_list[1]['Date'] == date]
low_risk_date = df_list[2][df_list[2]['Date'] == date]
if not (high_risk_date.empty or med_risk_date.empty or low_risk_date.empty):
current_nav_list = []
if not change_list[j][0]:
for i in range(len(portfolio_comp)):
previous_close_price = df_list[i][df_list[i]['Date'] == last_trade_date]['Close'].values[0]
current_close_price = df_list[i][df_list[i]['Date'] == date]['Close'].values[0]
current_nav_list.append(asset_list[i] * current_close_price / previous_close_price)
else:
for i in range(len(portfolio_comp)):
asset_list[i] = change_list[j][1][i]
current_nav_list.append(asset_list[i])
last_trade_date = change_list[j][2]
nav_daily_dates_list.append(date)
for i in range(len(portfolio_comp)):
nav_daily_composition_list[i].append(current_nav_list[i])
daily_price_list.append(sum(current_nav_list)/300000 *100)
nav_daily_net_list.append(sum(current_nav_list))
j+=1
# Note that we are using the Laspeyres Price Index for calculation
daily_price_df = | pd.DataFrame({'Date': nav_daily_dates_list, 'Close': daily_price_list}) | pandas.DataFrame |
def telco_churn(quantile=.5):
'''Returns dataset in format x, [y1, y2]. This dataset
is useful for demonstrating multi-output model or for
experimenting with reduction strategy creation.
The data is from hyperparameter optimization experiment with
Kaggle telco churn dataset.
x: features
y1: val_loss
y2: val_f1score
quantile is for transforming the otherwise continuous y variables into
labels so that higher value is stronger. If set to 0 then original
continuous will be returned.'''
import wrangle
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/autonomio/examples/master/telco_churn/telco_churn_for_sensitivity.csv')
df = df.drop(['val_acc', 'loss', 'f1score', 'acc', 'round_epochs'], 1)
for col in df.iloc[:, 2:].columns:
df = wrangle.col_to_multilabel(df, col)
df = wrangle.df_rename_cols(df)
if quantile > 0:
y1 = (df.C0 < df.C0.quantile(quantile)).astype(int).values
y2 = (df.C1 > df.C1.quantile(quantile)).astype(int).values
else:
y1 = df.C0.values
y2 = df.C1.values
x = df.drop(['C0', 'C1'], 1).values
return x, [y1, y2]
def icu_mortality(samples=None):
import pandas as pd
base = 'https://raw.githubusercontent.com/autonomio/datasets/master/autonomio-datasets/'
df = pd.read_csv(base + 'icu_mortality.csv')
df = df.dropna(thresh=3580, axis=1)
df = df.dropna()
df = df.sample(frac=1).head(samples)
y = df['hospitalmortality'].astype(int).values
x = df.drop('hospitalmortality', axis=1).values
return x, y
def titanic():
import pandas as pd
base = 'https://raw.githubusercontent.com/autonomio/datasets/master/autonomio-datasets/'
df = pd.read_csv(base + 'titanic.csv')
y = df.survived.values
x = df[['age', 'sibsp', 'parch']]
cols = ['class', 'embark_town', 'who', 'deck', 'sex']
for col in cols:
x = pd.merge(x,
pd.get_dummies(df[col]),
left_index=True,
right_index=True)
x = x.values
print('BE CAREFUL, this dataset has nan values.')
return x, y
def iris():
import pandas as pd
from tensorflow.keras.utils import to_categorical
base = 'https://raw.githubusercontent.com/autonomio/datasets/master/autonomio-datasets/'
df = | pd.read_csv(base + 'iris.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from scipy.stats import entropy
import logging
from itertools import combinations
logger = logging.getLogger(__name__)
def filter_opinions(perspectives, opinions):
"""Return opinions for selected perspectives
Parameters:
perspectives : list of strings
list of strings containing names of perspectives to return opinions
for
opinions : dict of opinions (pandas DataFrames)
Returns:
dict of opinions (pandas DataFrames)
Dictionary containing opinions for the selected perspectives
"""
filtered = {}
for persp in perspectives:
filtered[persp] = opinions[persp].copy()
return filtered
def contrastive_opinions(query, topics, opinions, nks):
"""Returns a DataFrame containing contrastive opinions for the query.
Implements contrastive opinion modeling as specified in [Fang et al., 2012]
equation 1. The resulting probability distributions over words are
normalized, in order to facilitate mutual comparisons.
Example usage:
co = contrastive_opinions('mishandeling')
print print_topic(co[0])
Parameters:
query : str
The word contrastive opinions should be calculated for.
topics : pandas DataFrame
DataFrame containg the topics
opinions : dict of pandas DataFrames
Dictionary containing a pandas DataFrame for every perspective
nks : numpy ndarray
numpy array containing nks counts
Returns:
pandas DataFrame
The index of the DataFrame contains the opinion words and the
columns represent the perspectives.
"""
# TODO: fix case when word not in topicDictionary
logger.debug('calculating contrastive opinions')
opinion_words = list(opinions[opinions.keys()[0]].index)
result = []
for p, opinion in opinions.iteritems():
c_opinion = opinion * topics.loc[query] * nks[-1]
c_opinion = np.sum(c_opinion, axis=1)
c_opinion /= np.sum(c_opinion)
result.append(pd.Series(c_opinion, index=opinion_words, name=p))
return | pd.concat(result, axis=1, keys=[s.name for s in result]) | pandas.concat |
from itertools import product as it_product
from typing import List, Dict
import numpy as np
import os
import pandas as pd
from scipy.stats import spearmanr, wilcoxon
from provided_code.constants_class import ModelParameters
from provided_code.data_loader import DataLoader
from provided_code.dose_evaluation_class import EvaluateDose
from provided_code.general_functions import get_paths, get_predictions_to_optimize
def consolidate_data_for_analysis(cs: ModelParameters, force_new_consolidate: bool = False) \
-> [pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Consolidated data of all reference plans, dose predictions, and KBP plans. This may take about an hour to run, but
only needs to be run once for a given set of experiments.
Args:
cs: A constants object.
force_new_consolidate: Flag that will force consolidating data, which will overwrite previous data that was
consolidated in previous iterations.
Returns:
df_dose_error: Summary of dose error
df_dvh_metrics: Summary of DVH metric performance (can be converted to DVH error later)
df_clinical_criteria: Summary of clinical criteria performance
df_ref_dvh_metrics: Summary of reference dose DVH metrics
df_ref_clinical_criteria: Summary of reference dose clinical criteria performance
df_objective_data: The data from the objective functions (e.g., weights, objective function values)
df_solve_time: The time it took to solve models
"""
# Run consolidate_data_for_analysis when new predictions or plans
consolidate_data_paths = {'dose': f'{cs.results_data_dir}/dose_error_df.csv',
'dvh': f'{cs.results_data_dir}/dvh_metric_df.csv',
'clinical_criteria': f'{cs.results_data_dir}/clinical_criteria_df.csv',
'ref_dvh': f'{cs.results_data_dir}/reference_metrics.csv',
'ref_clinical_criteria': f'{cs.results_data_dir}/reference_criteria.csv',
'weights': f'{cs.results_data_dir}/weights_df.csv',
'solve_time': f'{cs.results_data_dir}/solve_time_df.csv'
}
# Check if consolidated data already exists
no_consolidated_date = False
for p in consolidate_data_paths.values():
if not os.path.isfile(p):
print(p)
no_consolidated_date = True
os.makedirs(cs.results_data_dir, exist_ok=True) # Make dir for results
# Consolidate data if it doesn't exist yet or force flag is True
if no_consolidated_date or force_new_consolidate:
# Prepare strings for data that will be evaluated
predictions_to_optimize, prediction_names = get_predictions_to_optimize(cs)
patient_names = os.listdir(cs.reference_data_dir)
hold_out_plan_paths = get_paths(cs.reference_data_dir, ext='') # list of paths used for held out testing
# Evaluate dose metrics
patient_data_loader = DataLoader(hold_out_plan_paths, mode_name='evaluation') # Set data loader
dose_evaluator_sample = EvaluateDose(patient_data_loader)
# Make reference dose DVH metrics and clinical criteria
dose_evaluator_sample.make_metrics()
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_dose_metric_df').to_csv(
consolidate_data_paths['ref_dvh'])
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_criteria_df').to_csv(
consolidate_data_paths['ref_clinical_criteria'])
# Initialize DataFrames for all scores and errors
optimizer_names = os.listdir(cs.plans_dir) # Get names of all optimizers
dose_error_index_dict, dvh_metric_index_dict = make_error_and_metric_indices(patient_names,
dose_evaluator_sample,
optimizer_names)
df_dose_error_indices = pd.MultiIndex.from_product(**dose_error_index_dict)
df_dvh_error_indices = pd.MultiIndex.from_arrays(**dvh_metric_index_dict)
# Make DataFrames
df_dose_error = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_solve_time = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_dvh_metrics = pd.DataFrame(columns=prediction_names, index=df_dvh_error_indices)
df_clinical_criteria = pd.DataFrame(columns=prediction_names, index=df_dvh_error_indices)
weights_list = []
weight_columns = []
# Iterate through each prediction in the list of prediction_names
for prediction in prediction_names:
# Make a dataloader that loads predicted dose distributions
prediction_paths = get_paths(f'{cs.prediction_dir}/{prediction}', ext='csv')
prediction_dose_loader = DataLoader(prediction_paths, mode_name='predicted_dose') # Set prediction loader
# Evaluate predictions and plans with respect to ground truth
dose_evaluator = EvaluateDose(patient_data_loader, prediction_dose_loader)
populate_error_dfs(dose_evaluator, df_dose_error, df_dvh_metrics, df_clinical_criteria, prediction,
'Prediction')
# Make dataloader for plan dose distributions
for opt_name in optimizer_names:
print(opt_name)
# Get the paths of all optimized plans for prediction
cs.get_optimization_directories(prediction, opt_name)
weights_list, weight_columns = populate_weights_df(cs, weights_list)
populate_solve_time_df(cs, df_solve_time)
# Make data loader to load plan doses
plan_paths = get_paths(cs.plan_dose_from_pred_dir, ext='csv') # List of all plan dose paths
plan_dose_loader = DataLoader(plan_paths, mode_name='predicted_dose') # Set plan dose loader
plan_evaluator = EvaluateDose(patient_data_loader, plan_dose_loader) # Make evaluation object
# Ignore prediction name if no data exists, o/w populate DataFrames
if not patient_data_loader.file_paths_list:
print('No patient information was given to calculate metrics')
else:
# Evaluate prediction errors
populate_error_dfs(plan_evaluator, df_dose_error, df_dvh_metrics, df_clinical_criteria, prediction,
opt_name)
# Clean up weights
weights_df = pd.DataFrame(weights_list, columns=weight_columns)
weights_df.set_index(['Objective', 'Structure', 'Patients', 'Dose_type', 'Prediction'], inplace=True)
weights_df = weights_df.unstack('Prediction')
# Save dose and DVH error DataFrames
df_dose_error.to_csv(consolidate_data_paths['dose'])
df_dvh_metrics.to_csv(consolidate_data_paths['dvh'])
df_clinical_criteria.to_csv(consolidate_data_paths['clinical_criteria'])
weights_df.to_csv(consolidate_data_paths['weights'])
df_solve_time.to_csv(consolidate_data_paths['solve_time'])
# Loads the DataFrames that contain consolidated data
df_dose_error = pd.read_csv(consolidate_data_paths['dose'], index_col=[0, 1])
df_dvh_metrics = pd.read_csv(consolidate_data_paths['dvh'], index_col=[0, 1, 2, 3])
df_clinical_criteria = pd.read_csv(consolidate_data_paths['clinical_criteria'], index_col=[0, 1, 2, 3])
df_ref_dvh_metrics = pd.read_csv(consolidate_data_paths['ref_dvh'], index_col=[0, 1, 2, 3], squeeze=True)
df_ref_dvh_metrics.index.set_names(df_dvh_metrics.index.names, inplace=True)
df_ref_clinical_criteria = pd.read_csv(consolidate_data_paths['ref_clinical_criteria'], index_col=[0, 1, 2, 3],
squeeze=True)
df_ref_clinical_criteria.index.set_names(df_clinical_criteria.index.names, inplace=True)
df_objective_data = pd.read_csv(consolidate_data_paths['weights'], index_col=[0, 1, 2, 3], header=[0, 1])
df_solve_time = pd.read_csv(consolidate_data_paths['solve_time'], index_col=[0, 1]).drop('Prediction', axis=0,
level=0)
# Adjust DVH metric signs to reflect direction of "better"
df_dvh_metrics.loc[:, :, ['D_95', 'D_99'], :] *= -1
df_clinical_criteria.loc[:, :, ['D_95', 'D_99'], :] *= -1
df_ref_dvh_metrics.loc[:, :, ['D_95', 'D_99'], :] *= -1
df_ref_clinical_criteria.loc[:, :, ['D_95', 'D_99'], :] *= -1
return df_dose_error, df_dvh_metrics, df_clinical_criteria, df_ref_dvh_metrics, df_ref_clinical_criteria, df_objective_data, df_solve_time
def make_error_and_metric_indices(patient_names: List[str], dose_evaluator_sample: EvaluateDose, optimizers: List[str]) \
-> [Dict, Dict]:
"""
Initialize the data frame indices for the dose error and DVH metric DataFrames
Args:
patient_names: list of patient names/identifiers
dose_evaluator_sample: A sample of the dose evaluator object that will be used during the processing stage
optimizers: list of optimizer names
Returns:
dose_error_dict: Dictionaries with stored indices (dose type, patients) for dose error
dvh_metric_dict: Dictionaries with stored indices (dose types, patients) for DVH metrics
"""
iterables = [['Prediction', *optimizers], patient_names, dose_evaluator_sample.metric_difference_df.columns]
iterables_with_tuple = list(it_product(*iterables))
iterables_new = []
for i in iterables_with_tuple:
iterables_new.append((i[0], i[1], i[2][0], i[2][1]))
dose_error_indices = [iterables[0], iterables[1]]
dvh_metric_indices = list(zip(*iterables_new))
# Set names
dose_error_dict = {'iterables': dose_error_indices, 'names': ["Dose_type", "Patients"]}
dvh_metric_dict = {'arrays': dvh_metric_indices, 'names': ["Dose_type", "Patients", "Metric", "Structure"]}
return dose_error_dict, dvh_metric_dict
def populate_error_dfs(evaluator: EvaluateDose, df_dose_error: pd.DataFrame, df_dvh_metrics: pd.DataFrame,
df_clinical_criteria: pd.DataFrame, prediction_name: str, dose_type: str):
"""
Populates the DataFrames that summarize
Args:
evaluator: An EvaluateDose Object that will be summarized
df_dose_error: The DataFrame that contains dose errors
df_dvh_metrics: The DataFrame that contains DVH metrics
df_clinical_criteria: THe DataFrame that contains clinical criteria performace
prediction_name: The name of the prediction model
dose_type: The type of dose (e.g., reference, prediction, optimization model)
"""
# Evaluate prediction errors
evaluator.make_metrics()
# Save collection of dose errors
dose_indices = evaluator.dose_score_vec.index
df_dose_error.loc[(dose_type, dose_indices), prediction_name] = evaluator.dose_score_vec[dose_indices].values
# Populate the DVH errors
evaluated_dvh_metrics = evaluator.melt_dvh_metrics(dose_type)
df_dvh_metrics.loc[evaluated_dvh_metrics.index, prediction_name] = evaluated_dvh_metrics.values
# Populate clinical criteria metrics
evaluated_clinical_criteria = evaluator.melt_dvh_metrics(dose_type, dose_metrics_att='new_criteria_metric_df')
df_clinical_criteria.loc[evaluated_clinical_criteria.index, prediction_name] = evaluated_clinical_criteria.values
def populate_weights_df(cs: ModelParameters, weights_list) -> [List, List]:
"""
Populated a list (weights_list) with data related to cost function (e.g., structure, objective function values)
Args:
cs: Constant object
weights_list: List of weights that will be populated
Returns:
weights_list: List of populated weights
weights_list_column_headers: Column headers for list
"""
# Initialize information for plan weights
plan_weights_paths = get_paths(cs.plan_weights_from_pred_dir, ext='csv')
plan_weights_loader = DataLoader(plan_weights_paths, mode_name='plan_weights')
weights_list_column_headers = []
# Load weight info for each patient
for batch_idx in range(plan_weights_loader.number_of_batches()):
data_batch = plan_weights_loader.get_batch(batch_idx)
pt_id = data_batch['patient_list'][0]
plan_weights = data_batch['plan_weights'][0]
# Separate objective function from structure
roi_criteria_pairs = plan_weights.apply(lambda x: pd.Series(x['Objective'].split(' ', 1)), axis=1)
plan_weights['Structure'] = roi_criteria_pairs[0]
plan_weights['Objective'] = roi_criteria_pairs[1]
# Adjust plan weights DataFrame with plan/patient data
plan_weights['Patients'] = pt_id
plan_weights['Dose_type'] = cs.opt_name
plan_weights['Prediction'] = cs.prediction_name
# Extend weight data to weight list
weights_list.extend(plan_weights.values.tolist())
weights_list_column_headers = plan_weights.columns.to_list()
return weights_list, weights_list_column_headers
def populate_solve_time_df(cs: ModelParameters, df_solve_time: pd.DataFrame):
"""
Populated a DataFrame (df_solve_time) with data related to solve time and plan (optimization) gap
Args:
cs: Constants object
df_solve_time: DataFrame with solve time information
"""
# Initialize plan gap/solve time information
plan_gap_paths = get_paths(cs.plan_gap_from_pred_dir, ext='csv')
plan_gap_loader = DataLoader(plan_gap_paths, mode_name='plan_gap')
# Load solve time/gap for each patient
for batch_idx in range(plan_gap_loader.number_of_batches()):
data_batch = plan_gap_loader.get_batch(batch_idx)
pt_id = data_batch['patient_list'][0]
plan_gap = data_batch['plan_gap'][0]
# Populate summary dataframe with time/gap info
df_solve_time.loc[(cs.opt_name, pt_id), cs.prediction_name] = plan_gap['solve time']
def summarize_scores(cs: ModelParameters, df_errors: pd.DataFrame, name: str, level=0) -> pd.DataFrame:
"""
Args:
cs: Model constants
df_errors: DataFrame of errors that can be converted into a score by taking average for every prediction/opt
name: Name of score that will be generated
level: Level of df_errors that average is calculated over
Returns:
ranked_scores:
"""
# Calculate scores
score = round(df_errors.mean(axis=0, level=level), 3)
score = score.loc[cs.optimization_short_hands_dict.keys()]
rank = score.rank(axis=1)
# Set order based on prediction rank
sorted_scores = score.sort_values(by='Prediction', axis=1).columns
# Rename index prior to concatenating the data
score.index = score.index.map(lambda x: f'{x} {name.lower()} score')
rank.index = rank.index.map(lambda x: f'{x} {name.lower()} rank')
# Concat scores and rank, ordered based on prediction rank
ranked_scores = | pd.concat((score[sorted_scores], rank[sorted_scores])) | pandas.concat |
#!/usr/bin/env python3
# #############################################################################
# Sylvain @ GIS / Biopolis / Singapore
# <NAME> <<EMAIL>>
# Started on 2019-12-17
# Reads Binning Project
#
# #############################################################################
#
#
# About reports from Kraken2
#
import os
import os.path as osp
# import ete3.ncbi_taxonomy
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.metrics import auc
from plot_me.bio import ncbi, get_list_rank
from plot_me.tools import PATHS
pd.set_option('precision', 5)
# ncbi = ete3.ncbi_taxonomy.NCBITaxa()
class Report:
kraken_cols = ["per_clade_covered", "reads_clade_covered", "reads_direct_taxo", "rank", "taxon", "scientific_name"]
line_style = ['-.', '--', ':']
obj_counter = 0
def __init__(self, title, folder, nb_reads=None):
self.obj_id = Report.obj_counter
Report.obj_counter += 1
self.title = title
self.folder = folder
self.nb_reads = nb_reads
self.nb_assigned = None
self.report = None
self.all_reports = None
self.thresholds = None
self.recall = None
self.precision = None
self.auc = None
def load_full(self, filename):
"""Load and filter to species only"""
self.report = pd.read_csv(osp.join(self.folder, filename), sep="\t", names=self.kraken_cols)
self.nb_reads = self.report[self.report.taxon <= 1].reads_clade_covered.sum()
self.report = self.report[self.report["rank"] == "S"][["reads_clade_covered", "taxon"]].groupby(["taxon"]).sum()
# self.report["cluster"] = "full"
self.report.rename(columns={"reads_clade_covered": "full_DB"}, inplace=True)
self.assigned_reads()
def load_multi(self, list_files):
""" Reports should have an identifier <.bin-x.> to identify them """
reports_bins = []
bins = []
list_total_nb_reads = []
for file_name in list_files:
tmp_df = pd.read_csv(osp.join(self.folder, file_name), sep="\t", names=self.kraken_cols)
list_total_nb_reads.append(tmp_df[tmp_df.taxon <= 1].reads_clade_covered.sum())
tmp_df["cluster"] = file_name.split(".bin-")[1].split(".")[0]
reports_bins.append(tmp_df)
print(self.title, list_total_nb_reads)
self.nb_reads = sum(list_total_nb_reads)
report_bins = pd.concat(reports_bins, ignore_index=True)
report_selected = report_bins[report_bins["rank"] == "S"][["reads_clade_covered", "taxon", "cluster"]]
# todo: losing the cluster provenance by summing everything :/
aggregated = report_selected.groupby(["taxon"]).sum()
aggregated.rename(columns={"reads_clade_covered": self.title}, inplace=True)
# aggregated.sort_values("reads_clade_covered", ascending=False, inplace=True)
self.report = aggregated
self.assigned_reads()
def load_gt(self, file_path):
gt_tmp = pd.read_pickle(file_path)
gt_counting = pd.DataFrame(gt_tmp.taxon.value_counts())
gt_counting.rename(columns={"taxon": "ground_truth"}, inplace=True)
gt_counting["taxon"] = get_list_rank(gt_counting.index)
self.report = gt_counting.groupby(["taxon"]).sum()
self.assigned_reads()
# self.report["cluster"] = "gt"
def assigned_reads(self):
self.nb_assigned = int(self.report.iloc[:, 0].sum())
def normalize(self):
self.report.iloc[:, 0] /= self.nb_assigned
def prec_recall(self, gt_species):
""" Get a set containing the species present. change to ratio instead of absolute number after working on the report """
# print(self.title)
# Floor the numbers by multiplying by 'rounding', then floor with numpy, then dividing again.
rounding = 10 ** 5
thresholds = ((self.report.iloc[:, 0] * rounding).apply(np.floor) / rounding).unique()
thresholds.sort()
data = []
for i, threshold in enumerate(thresholds):
found = set(self.report[self.report.iloc[:, 0] >= threshold].index)
tp = len(set.intersection(found, gt_species))
fn = len(gt_species) - tp
fp = len(found) - tp
data.append((threshold, tp, fn, fp,))
df_auc = pd.DataFrame(data, columns=["threshold", "tp", "fn", "fp"])
df_auc["recall"] = df_auc.tp / (df_auc.tp + df_auc.fn)
df_auc["precision"] = df_auc.tp / (df_auc.tp + df_auc.fp)
df_auc[["recall", "precision"]] = df_auc[["recall", "precision"]].fillna(0)
# Extend the last precision to 0 recall, as we don't have abundance threshold down to 0%
df_auc.loc[df_auc.index.max() + 1] = df_auc.iloc[-1]
df_auc.recall.iloc[-1] = 0
self.df_auc = df_auc
self.thresholds = thresholds
self.recall = df_auc["recall"].tolist()
self.precision = df_auc["precision"].tolist()
self.auc = auc(self.recall, self.precision)
def plot_pr(self, nb=5, total=10, string_gt=""):
# todo: thicker line, dotted line
ratio = (total - nb) / total
label = f"auc={self.auc:.3f}, ({self.nb_assigned}/{self.nb_reads}) : {self.title}"
plt.plot(self.recall, self.precision, # alpha=0.7,
linewidth=2 + 3 * ratio, linestyle=self.line_style[self.obj_id % len(self.line_style)],
marker='+', markersize=10 + 4 * ratio, markeredgewidth=1 + 2 * ratio,
label=label) # Change line style so see them despite overlaying each other
self.legend = label
def __repr__(self):
return f"Report from {self.title} DB classification, {self.folder}"
class ReportsAnalysis:
def __init__(self, folder, string_full, string_bins, path_ground_truth):
""" string* are matching string to find the full and bin reports """
self.folder = folder
self.string_full = string_full
self.string_bins = string_bins
self.path_ground_truth = path_ground_truth
self.path_report_full = ""
self.path_report_bins = ""
self.selected_r = None
self.gt = None
self.gt_stats = None
self.reports = {}
self.nb_reads = None
self.gt_species = None
self.auc = None
self._recall = {}
self._precision = {}
@property
def report(self):
if self.selected_r is None:
self.selected_r = 0
if self.selected_r in self.reports.keys():
return self.reports[self.selected_r]
else:
return None
@property
def recall(self):
if self.selected_r not in self._recall.keys():
return None
else:
return self._recall[self.selected_r]
@property
def precision(self):
if self.selected_r not in self._precision.keys():
return None
else:
return self._precision[self.selected_r]
def load_gt(self):
self.gt = | pd.read_pickle(self.path_ground_truth) | pandas.read_pickle |
# Copyright (c) 2020, NVIDIA CORPORATION.
import operator
import re
from string import ascii_letters, digits
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.tests.utils import (
DATETIME_TYPES,
NUMERIC_TYPES,
TIMEDELTA_TYPES,
assert_eq,
assert_exceptions_equal,
)
def _series_na_data():
return [
pd.Series([0, 1, 2, np.nan, 4, None, 6]),
pd.Series(
[0, 1, 2, np.nan, 4, None, 6],
index=["q", "w", "e", "r", "t", "y", "u"],
name="a",
),
pd.Series([0, 1, 2, 3, 4]),
pd.Series(["a", "b", "u", "h", "d"]),
pd.Series([None, None, np.nan, None, np.inf, -np.inf]),
pd.Series([]),
pd.Series(
[pd.NaT, pd.Timestamp("1939-05-27"), pd.Timestamp("1940-04-25")]
),
pd.Series([np.nan]),
pd.Series([None]),
pd.Series(["a", "b", "", "c", None, "e"]),
]
@pytest.mark.parametrize(
"data",
[
{"a": 1, "b": 2, "c": 24, "d": 1010},
{"a": 1},
{1: "a", 2: "b", 24: "c", 1010: "d"},
{1: "a"},
],
)
def test_series_init_dict(data):
pandas_series = pd.Series(data)
cudf_series = cudf.Series(data)
assert_eq(pandas_series, cudf_series)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3],
"b": [2, 3, 5],
"c": [24, 12212, 22233],
"d": [1010, 101010, 1111],
},
{"a": [1]},
],
)
def test_series_init_dict_lists(data):
assert_eq(pd.Series(data), cudf.Series(data))
@pytest.mark.parametrize(
"data",
[
[1, 2, 3, 4],
[1.0, 12.221, 12.34, 13.324, 324.3242],
[-10, -1111, 100, 11, 133],
],
)
@pytest.mark.parametrize(
"others",
[
[10, 11, 12, 13],
[0.1, 0.002, 324.2332, 0.2342],
[-10, -1111, 100, 11, 133],
],
)
@pytest.mark.parametrize("ignore_index", [True, False])
def test_series_append_basic(data, others, ignore_index):
psr = pd.Series(data)
gsr = cudf.Series(data)
other_ps = pd.Series(others)
other_gs = cudf.Series(others)
expected = psr.append(other_ps, ignore_index=ignore_index)
actual = gsr.append(other_gs, ignore_index=ignore_index)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
[
"abc",
"def",
"this is a string",
"this is another string",
"a",
"b",
"c",
],
["a"],
],
)
@pytest.mark.parametrize(
"others",
[
[
"abc",
"def",
"this is a string",
"this is another string",
"a",
"b",
"c",
],
["a"],
["1", "2", "3", "4", "5"],
["+", "-", "!", "_", "="],
],
)
@pytest.mark.parametrize("ignore_index", [True, False])
def test_series_append_basic_str(data, others, ignore_index):
psr = | pd.Series(data) | pandas.Series |
# Copyright (C) 2018 GuQiangJs. https://github.com/GuQiangJS
# Licensed under Apache License 2.0 <see LICENSE file>
import datetime
import unittest
import numpy as np
import pandas as pd
from finance_datareader_py.sina import SinaQuoteReader
from finance_datareader_py.sina import get_cpi
from finance_datareader_py.sina import get_dividends
from finance_datareader_py.sina import get_gold_and_foreign_exchange_reserves
from finance_datareader_py.sina import get_measure_of_money_supply
from finance_datareader_py.sina import get_ppi
from finance_datareader_py.sina import get_required_reserve_ratio
class sina_TestCase(unittest.TestCase):
def test_get_dividends(self):
df1, df2 = get_dividends('000541')
self.assertIsNotNone(df1)
self.assertFalse(df1.empty)
self.assertIsNotNone(df2)
self.assertFalse(df2.empty)
print(df1)
print('------------')
print(df2)
dt = datetime.date(2018, 5, 5)
df1 = df1.loc[df1['公告日期'] == dt]
self.assertEqual(np.float64(3.29), df1.at[0, '派息(税前)(元)'])
self.assertTrue(pd.isna(df1.at[0, '红股上市日']))
self.assertEqual(pd.Timestamp(2018, 5, 10), df1.at[0, '股权登记日'])
self.assertEqual(np.float64(1), df1.at[0, '转增(股)'])
self.assertEqual(np.float64(0), df1.at[0, '送股(股)'])
self.assertEqual(pd.Timestamp(2018, 5, 11), df1.at[0, '除权除息日'])
dt = datetime.date(1994, 12, 24)
df2 = df2.loc[df2['公告日期'] == dt]
self.assertEqual(np.float64(2), df2.at[0, '配股方案(每10股配股股数)'])
self.assertEqual(np.float64(8), df2.at[0, '配股价格(元)'])
self.assertEqual(np.float64(115755000), df2.at[0, '基准股本(万股)'])
self.assertEqual(pd.Timestamp(1995, 1, 4), df2.at[0, '除权日'])
self.assertEqual(pd.Timestamp(1995, 1, 3), df2.at[0, '股权登记日'])
self.assertEqual( | pd.Timestamp(1995, 1, 16) | pandas.Timestamp |
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.2** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-social-network-analysis/resources/yPcBs) course resource._
#
# ---
# # Assignment 4
# In[3]:
import networkx as nx
import pandas as pd
import numpy as np
import pickle
# ---
#
# ## Part 1 - Random Graph Identification
#
# For the first part of this assignment you will analyze randomly generated graphs and determine which algorithm created them.
# In[4]:
P1_Graphs = pickle.load(open('A4_graphs','rb'))
P1_Graphs
# <br>
# `P1_Graphs` is a list containing 5 networkx graphs. Each of these graphs were generated by one of three possible algorithms:
# * Preferential Attachment (`'PA'`)
# * Small World with low probability of rewiring (`'SW_L'`)
# * Small World with high probability of rewiring (`'SW_H'`)
#
# Anaylze each of the 5 graphs and determine which of the three algorithms generated the graph.
#
# *The `graph_identification` function should return a list of length 5 where each element in the list is either `'PA'`, `'SW_L'`, or `'SW_H'`.*
# In[5]:
def degree_distribution(G):
degrees = G.degree()
degree_values = sorted(set(degrees.values()))
histogram = [list(degrees.values()).count(i) / float(nx.number_of_nodes(G))
for i in degree_values]
return histogram
# In[6]:
degree_distribution(P1_Graphs[2])
# In[7]:
nx.average_clustering(P1_Graphs[1])
# In[9]:
nx.average_shortest_path_length(P1_Graphs[1])
# In[10]:
for G in P1_Graphs:
print(nx.average_clustering(G), nx.average_shortest_path_length(G),
len(degree_distribution(G)))
# In[11]:
def graph_identification():
methods = []
for G in P1_Graphs:
clustering = nx.average_clustering(G)
shortest_path = nx.average_shortest_path_length(G)
degree_hist = degree_distribution(G)
if len(degree_hist) > 10:
methods.append('PA')
elif clustering < 0.1:
methods.append('SW_H')
else:
methods.append('SW_L')
return methods
# In[12]:
graph_identification()
# ---
#
# ## Part 2 - Company Emails
#
# For the second part of this assignment you will be working with a company's email network where each node corresponds to a person at the company, and each edge indicates that at least one email has been sent between two people.
#
# The network also contains the node attributes `Department` and `ManagementSalary`.
#
# `Department` indicates the department in the company which the person belongs to, and `ManagementSalary` indicates whether that person is receiving a management position salary.
# In[13]:
G = nx.read_gpickle('email_prediction.txt')
print(nx.info(G))
# In[14]:
G.nodes(data = True)[:10]
# ### Part 2A - Salary Prediction
#
# Using network `G`, identify the people in the network with missing values for the node attribute `ManagementSalary` and predict whether or not these individuals are receiving a management position salary.
#
# To accomplish this, you will need to create a matrix of node features using networkx, train a sklearn classifier on nodes that have `ManagementSalary` data, and predict a probability of the node receiving a management salary for nodes where `ManagementSalary` is missing.
#
#
#
# Your predictions will need to be given as the probability that the corresponding employee is receiving a management position salary.
#
# The evaluation metric for this assignment is the Area Under the ROC Curve (AUC).
#
# Your grade will be based on the AUC score computed for your classifier. A model which with an AUC of 0.88 or higher will receive full points, and with an AUC of 0.82 or higher will pass (get 80% of the full points).
#
# Using your trained classifier, return a series of length 252 with the data being the probability of receiving management salary, and the index being the node id.
#
# Example:
#
# 1 1.0
# 2 0.0
# 5 0.8
# 8 1.0
# ...
# 996 0.7
# 1000 0.5
# 1001 0.0
# Length: 252, dtype: float64
# In[15]:
G.nodes(data = True)[0][1]
# In[16]:
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
def salary_predictions():
def is_management(node):
managementSalary = node[1]['ManagementSalary']
if managementSalary == 0:
return 0
elif managementSalary == 1:
return 1
else:
return None
df = pd.DataFrame(index=G.nodes())
df['clustering'] = pd.Series(nx.clustering(G))
df['degree'] = pd.Series(G.degree())
df['degree_centrality'] = pd.Series(nx.degree_centrality(G))
df['closeness'] = pd.Series(nx.closeness_centrality(G, normalized=True))
df['betweeness'] = pd.Series(nx.betweenness_centrality(G, normalized=True))
df['pr'] = pd.Series(nx.pagerank(G))
df['is_management'] = pd.Series([is_management(node) for node in G.nodes(data=True)])
df_train = df[~pd.isnull(df['is_management'])]
df_test = df[pd.isnull(df['is_management'])]
features = ['clustering', 'degree', 'degree_centrality', 'closeness', 'betweeness', 'pr']
X_train = df_train[features]
Y_train = df_train['is_management']
X_test = df_test[features]
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = MLPClassifier(hidden_layer_sizes = [10, 5], alpha = 5,
random_state = 0, solver='lbfgs', verbose=0)
clf.fit(X_train_scaled, Y_train)
test_proba = clf.predict_proba(X_test_scaled)[:, 1]
return pd.Series(test_proba,X_test.index)
# prediction = salary_predictions()
# In[17]:
salary_predictions()
# ### Part 2B - New Connections Prediction
#
# For the last part of this assignment, you will predict future connections between employees of the network. The future connections information has been loaded into the variable `future_connections`. The index is a tuple indicating a pair of nodes that currently do not have a connection, and the `Future Connection` column indicates if an edge between those two nodes will exist in the future, where a value of 1.0 indicates a future connection.
# In[18]:
future_connections = pd.read_csv('Future_Connections.csv', index_col=0, converters={0: eval})
future_connections.head(10)
# Using network `G` and `future_connections`, identify the edges in `future_connections` with missing values and predict whether or not these edges will have a future connection.
#
# To accomplish this, you will need to create a matrix of features for the edges found in `future_connections` using networkx, train a sklearn classifier on those edges in `future_connections` that have `Future Connection` data, and predict a probability of the edge being a future connection for those edges in `future_connections` where `Future Connection` is missing.
#
#
#
# Your predictions will need to be given as the probability of the corresponding edge being a future connection.
#
# The evaluation metric for this assignment is the Area Under the ROC Curve (AUC).
#
# Your grade will be based on the AUC score computed for your classifier. A model which with an AUC of 0.88 or higher will receive full points, and with an AUC of 0.82 or higher will pass (get 80% of the full points).
#
# Using your trained classifier, return a series of length 122112 with the data being the probability of the edge being a future connection, and the index being the edge as represented by a tuple of nodes.
#
# Example:
#
# (107, 348) 0.35
# (542, 751) 0.40
# (20, 426) 0.55
# (50, 989) 0.35
# ...
# (939, 940) 0.15
# (555, 905) 0.35
# (75, 101) 0.65
# Length: 122112, dtype: float64
# In[19]:
future_connections.head()
# In[20]:
G.node[1]
# In[22]:
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
def new_connections_predictions():
for node in G.nodes():
G.node[node]['community'] = G.node[node]['Department']
preferential_attachment = list(nx.preferential_attachment(G))
df = pd.DataFrame(index=[(x[0], x[1]) for x in preferential_attachment])
df['preferential_attachment'] = [x[2] for x in preferential_attachment]
cn_soundarajan_hopcroft = list(nx.cn_soundarajan_hopcroft(G))
df_cn_soundarajan_hopcroft = pd.DataFrame(index=[(x[0], x[1]) for x in cn_soundarajan_hopcroft])
df_cn_soundarajan_hopcroft['cn_soundarajan_hopcroft'] = [x[2] for x in cn_soundarajan_hopcroft]
df = df.join(df_cn_soundarajan_hopcroft,how='outer')
df['cn_soundarajan_hopcroft'] = df['cn_soundarajan_hopcroft'].fillna(value=0)
df['resource_allocation_index'] = [x[2] for x in list(nx.resource_allocation_index(G))]
df['jaccard_coefficient'] = [x[2] for x in list(nx.jaccard_coefficient(G))]
df = future_connections.join(df,how='outer')
df_train = df[~pd.isnull(df['Future Connection'])]
df_test = df[pd.isnull(df['Future Connection'])]
features = ['cn_soundarajan_hopcroft', 'preferential_attachment', 'resource_allocation_index', 'jaccard_coefficient']
X_train = df_train[features]
Y_train = df_train['Future Connection']
X_test = df_test[features]
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = MLPClassifier(hidden_layer_sizes = [10, 5], alpha = 5,
random_state = 0, solver='lbfgs', verbose=0)
clf.fit(X_train_scaled, Y_train)
test_proba = clf.predict_proba(X_test_scaled)[:, 1]
predictions = | pd.Series(test_proba,X_test.index) | pandas.Series |
import requests
import pandas as pd
import numpy as np
import configparser
from datetime import timedelta, datetime
from dateutil import relativedelta, parser, rrule
from dateutil.rrule import WEEKLY
class whoop_login:
'''A class object to allow a user to login and store their authorization code,
then perform pulls using the code in order to access different types of data'''
def __init__(self, auth_code=None, whoop_id=None,current_datetime=datetime.utcnow()):
self.auth_code=auth_code
self.whoop_id=whoop_id
self.current_datetime=current_datetime
self.start_datetime=None
self.all_data=None
self.all_activities=None
self.sport_dict=None
self.all_sleep=None
self.all_sleep_events=None
def pull_api(self, url,df=False):
auth_code=self.auth_code
headers={'authorization':auth_code}
pull=requests.get(url,headers=headers)
if pull.status_code==200 and len(pull.content)>1:
if df:
d=pd.json_normalize(pull.json())
return d
else:
return pull.json()
else:
return "no response"
def pull_sleep_main(self,sleep_id):
athlete_id=self.whoop_id
sleep=self.pull_api('https://api-7.whoop.com/users/{}/sleeps/{}'.format(athlete_id,sleep_id))
main_df=pd.json_normalize(sleep)
return main_df
def pull_sleep_events(self,sleep_id):
athlete_id=self.whoop_id
sleep=self.pull_api('https://api-7.whoop.com/users/{}/sleeps/{}'.format(athlete_id,sleep_id))
events_df=pd.json_normalize(sleep['events'])
events_df['id']=sleep_id
return events_df
def get_authorization(self,user_ini):
'''
Function to get the authorization token and user id.
This must be completed before a user can query the api
'''
config=configparser.ConfigParser()
config.read(user_ini)
username=config['whoop']['username']
password=config['whoop']['password']
headers={
"username": username,
"password": password,
"grant_type": "password",
"issueRefresh": False}
auth = requests.post("https://api-7.whoop.com/oauth/token", json=headers)
if auth.status_code==200:
content=auth.json()
user_id=content['user']['id']
token=content['access_token']
start_time=content['user']['profile']['createdAt']
self.whoop_id=user_id
self.auth_code='bearer ' + token
self.start_datetime=start_time
print("Authentication successful")
else:
print("Authentication failed - please double check your credentials")
def get_keydata_all(self):
'''
This function returns a dataframe of WHOOP metrics for each day of WHOOP membership.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
if self.start_datetime:
if self.all_data is not None:
## All data already pulled
return self.all_data
else:
start_date=parser.isoparse(self.start_datetime).replace(tzinfo=None)
end_time='T23:59:59.999Z'
start_time='T00:00:00.000Z'
intervals=rrule.rrule(freq=WEEKLY,interval=1,until=self.current_datetime, dtstart=start_date)
date_range=[[d.strftime('%Y-%m-%d') + start_time,
(d+relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d') + end_time] for d in intervals]
all_data=pd.DataFrame()
for dates in date_range:
cycle_url='https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(self.whoop_id,
dates[1],
dates[0])
data=self.pull_api(cycle_url,df=True)
all_data=pd.concat([all_data,data])
all_data.reset_index(drop=True,inplace=True)
## fixing the day column so it's not a list
all_data['days']=all_data['days'].map(lambda d: d[0])
all_data.rename(columns={"days":'day'},inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols=['qualityDuration','needBreakdown.baseline','needBreakdown.debt','needBreakdown.naps',
'needBreakdown.strain','needBreakdown.total']
for sleep_col in sleep_cols:
all_data['sleep.' + sleep_col]=all_data['sleep.' + sleep_col].astype(float).apply(lambda x: np.nan if np.isnan(x) else x/60000)
## Making nap variable
all_data['nap_duration']=all_data['sleep.naps'].apply(lambda x: x[0]['qualityDuration']/60000 if len(x)==1 else(
sum([y['qualityDuration'] for y in x if y['qualityDuration'] is not None])/60000 if len(x)>1 else 0))
all_data.drop(['sleep.naps'],axis=1,inplace=True)
## dropping duplicates subsetting because of list columns
all_data.drop_duplicates(subset=['day','sleep.id'],inplace=True)
self.all_data=all_data
return all_data
else:
print("Please run the authorization function first")
def get_activities_all(self):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe'''
if self.sport_dict:
sport_dict=self.sport_dict
else:
sports=self.pull_api('https://api-7.whoop.com/sports')
sport_dict={sport['id']:sport['name'] for sport in sports}
self.sport_dict=self.sport_dict
if self.start_datetime:
## process activity data
if self.all_data is not None:
## use existing
data=self.all_data
else:
## pull all data to process activities
data=self.get_keydata_all()
## now process activities data
act_data=pd.json_normalize(data[data['strain.workouts'].apply(len)>0]['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper','during.lower']]=act_data[['during.upper','during.lower']].apply(pd.to_datetime)
act_data['total_minutes']=act_data.apply(lambda x: (x['during.upper']-x['during.lower']).total_seconds()/60.0,axis=1)
for z in range(0,6):
act_data['zone{}_minutes'.format(z+1)]=act_data['zones'].apply(lambda x: x[z]/60000.)
act_data['sport_name']=act_data.sportId.apply(lambda x: sport_dict[x])
act_data['day']=act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones','during.bounds'],axis=1,inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities=act_data
return act_data
else:
print("Please run the authorization function first")
def get_sleep_all(self):
'''
This function returns all sleep metrics in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents one night of sleep
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data=self.all_data
else:
## pull timeframe data
data=self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep is not None:
## All sleep data already pulled
return self.all_sleep
else:
sleep_ids=data['sleep.id'].values.tolist()
sleep_list=[int(x) for x in sleep_ids if pd.isna(x)==False]
all_sleep= | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless REQUIRED by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""main function to convert user scripts"""
import os
import pandas as pd
import util_global
from conver_by_ast import conver_ast
from file_op import mkdir
from file_op import mkdir_and_copyfile
from file_op import write_report_terminator
from file_op import abs_join
from file_op import get_api_statistic
from file_op import adjust_index
from util import check_path_length
from util import log_warning
def conver():
"""The entry point to convert Tensorflow script"""
print("Begin conver, input file: " + util_global.get_value('input') + '\n')
out_path = util_global.get_value('output')
dst_path = os.path.split(util_global.get_value('input').rstrip('\\/'))[-1]
dst_path_new = dst_path + util_global.get_value('timestap')
conver_path = os.walk(util_global.get_value('input'))
report_dir = util_global.get_value('report')
mkdir(report_dir)
report_xlsx = os.path.join(report_dir, 'api_analysis_report.xlsx')
util_global.set_value('generate_dir_report', | pd.DataFrame() | pandas.DataFrame |
# This script is designed to build your sheet
# MUST HAVE A SERVICE ACCOUNT SET UP IN ORDER FOR THIS TO WORK
# The service account '.json' must also be present
# This script assumes the LoginData.csv and Service account JSON are colated with the script
# Please note, as of v2.1.2 this will NOT generate the charts present in the example file
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pandas as pd
login_data = | pd.read_csv('LoginData.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import pulp
pkg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, pkg_path)
from VarianceConstraints import VarianceConstraints # noqa: E402
_test_link = r'https://web.stanford.edu/~hastie/CASI_files/DATA/'
def test_data(n_obs=None, n_vars=None):
try:
data = | pd.read_csv('leukemia_big.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# ## Explore one-hit vs. two-hit samples in expression space
# In[1]:
from pathlib import Path
import pickle as pkl
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import sys; sys.path.append('..')
import config as cfg
from data_utilities import load_cnv_data
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# In[2]:
# park et al. geneset info
park_loss_data = cfg.data_dir / 'park_loss_df.tsv'
park_gain_data = cfg.data_dir / 'park_gain_df.tsv'
# park et al. significant gene info
park_loss_sig_data = cfg.data_dir / 'park_loss_df_sig_only.tsv'
park_gain_sig_data = cfg.data_dir / 'park_gain_df_sig_only.tsv'
# park et al. gene/cancer type predictions
park_preds_dir = cfg.data_dir / 'park_genes_all_preds'
# mutation and copy number data
pancancer_pickle = Path('/home/jake/research/mpmp/data/pancancer_data.pkl')
# gene expression/rppa data files
data_type = 'gene expression'
subset_feats = 10000
gene_expression_data_file = Path(
'/home/jake/research/mpmp/data/tcga_expression_matrix_processed.tsv.gz'
)
rppa_data_file = Path(
'/home/jake/research/mpmp/data/tcga_rppa_matrix_processed.tsv'
)
# ### Load mutation info
#
# For now, just use binary mutation status from the pancancer repo. In the future we could pull more granular info from MC3, but it would take some engineering of `1_get_mutation_counts` to do this for lots of genes.
# In[3]:
park_loss_df = pd.read_csv(park_loss_data, sep='\t', index_col=0)
park_loss_df.head()
# In[4]:
park_gain_df = pd.read_csv(park_gain_data, sep='\t', index_col=0)
park_gain_df.head()
# In[5]:
with open(pancancer_pickle, 'rb') as f:
pancancer_data = pkl.load(f)
# In[6]:
# get (binary) mutation data
# 1 = observed non-silent mutation in this gene for this sample, 0 otherwise
mutation_df = pancancer_data[1]
print(mutation_df.shape)
mutation_df.iloc[:5, :5]
# ### Load copy number info
#
# Get copy loss/gain info directly from GISTIC "thresholded" output. This should be the same as (or very similar to) what the Park et al. study uses.
# In[7]:
sample_freeze_df = pancancer_data[0]
copy_samples = set(sample_freeze_df.SAMPLE_BARCODE)
print(len(copy_samples))
# In[8]:
copy_loss_df, copy_gain_df = load_cnv_data(
cfg.data_dir / 'pancan_GISTIC_threshold.tsv',
copy_samples
)
print(copy_loss_df.shape)
copy_loss_df.iloc[:5, :5]
# In[9]:
print(copy_gain_df.shape)
copy_gain_df.iloc[:5, :5]
# In[10]:
sample_freeze_df.head()
# ### Load expression data
#
# We'll also standardize each feature, and subset to the top features by mean absolute deviation if `subset_feats` is set.
# In[11]:
if data_type == 'gene expression':
exp_df = pd.read_csv(gene_expression_data_file, sep='\t', index_col=0)
elif data_type == 'rppa':
exp_df = | pd.read_csv(rppa_data_file, sep='\t', index_col=0) | pandas.read_csv |
import argparse
import os
import random
import warnings
import numpy as np
import pandas as pd
import pyloudnorm as pyln
import soundfile as sf
from tqdm import tqdm
# Global parameters
# eps secures log and division
EPS = 1e-10
# max amplitude in sources and mixtures
MAX_AMP = 0.9
# In LibriSpeech all the sources are at 16K Hz
RATE = 16000
# We will randomize loudness between this range
MIN_LOUDNESS = -33
MAX_LOUDNESS = -25
# A random seed is used for reproducibility
random.seed(72)
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--librispeech_dir', type=str, required=True,
help='Path to librispeech root directory')
parser.add_argument('--librispeech_md_dir', type=str, required=True,
help='Path to librispeech metadata directory')
parser.add_argument('--wham_dir', type=str, required=True,
help='Path to wham root directory')
parser.add_argument('--wham_md_dir', type=str, required=True,
help='Path to wham metadata directory')
parser.add_argument('--metadata_outdir', type=str, default=None,
help='Where librimix metadata files will be stored.')
parser.add_argument('--n_src', type=int, required=True,
help='Number of sources desired to create the mixture')
def main(args):
librispeech_dir = args.librispeech_dir
librispeech_md_dir = args.librispeech_md_dir
wham_dir = args.wham_dir
wham_md_dir = args.wham_md_dir
n_src = args.n_src
# Create Librimix metadata directory
md_dir = args.metadata_outdir
if md_dir is None:
root = os.path.dirname(librispeech_dir)
md_dir = os.path.join(root, f'LibriMix/metadata')
os.makedirs(md_dir, exist_ok=True)
create_librimix_metadata(librispeech_dir, librispeech_md_dir, wham_dir,
wham_md_dir, md_dir, n_src)
def create_librimix_metadata(librispeech_dir, librispeech_md_dir, wham_dir,
wham_md_dir, md_dir, n_src):
""" Generate LibriMix metadata according to LibriSpeech metadata """
# Dataset name
dataset = f'libri{n_src}mix'
# List metadata files in LibriSpeech
librispeech_md_files = os.listdir(librispeech_md_dir)
# List metadata files in wham_noise
wham_md_files = os.listdir(wham_md_dir)
# If you wish to ignore some metadata files add their name here
# Example : to_be_ignored = ['dev-other.csv']
to_be_ignored = []
check_already_generated(md_dir, dataset, to_be_ignored,
librispeech_md_files)
# Go through each metadata file and create metadata accordingly
for librispeech_md_file in librispeech_md_files:
if not librispeech_md_file.endswith('.csv'):
print(f"{librispeech_md_file} is not a csv file, continue.")
continue
# Get the name of the corresponding noise md file
try:
wham_md_file = [f for f in wham_md_files if
f.startswith(librispeech_md_file.split('-')[0])][0]
except IndexError:
print('Wham metadata are missing you can either generate the '
'missing wham files or add the librispeech metadata to '
'to_be_ignored list')
break
# Open .csv files from LibriSpeech
librispeech_md = pd.read_csv(os.path.join(
librispeech_md_dir, librispeech_md_file), engine='python')
# Open .csv files from wham_noise
wham_md = pd.read_csv(os.path.join(
wham_md_dir, wham_md_file), engine='python')
# Filenames
save_path = os.path.join(md_dir,
'_'.join([dataset, librispeech_md_file]))
info_name = '_'.join([dataset, librispeech_md_file.strip('.csv'),
'info']) + '.csv'
info_save_path = os.path.join(md_dir, info_name)
print(f"Creating {os.path.basename(save_path)} file in {md_dir}")
# Create dataframe
mixtures_md, mixtures_info = create_librimix_df(
librispeech_md, librispeech_dir, wham_md, wham_dir,
n_src)
# Round number of files
mixtures_md = mixtures_md[:len(mixtures_md) // 100 * 100]
mixtures_info = mixtures_info[:len(mixtures_info) // 100 * 100]
# Save csv files
mixtures_md.to_csv(save_path, index=False)
mixtures_info.to_csv(info_save_path, index=False)
def check_already_generated(md_dir, dataset, to_be_ignored,
librispeech_md_files):
# Check if the metadata files in LibriSpeech already have been used
already_generated = os.listdir(md_dir)
for generated in already_generated:
if generated.startswith(f"{dataset}") and 'info' not in generated:
if 'train-100' in generated:
to_be_ignored.append('train-clean-100.csv')
elif 'train-360' in generated:
to_be_ignored.append('train-clean-360.csv')
elif 'dev' in generated:
to_be_ignored.append('dev-clean.csv')
elif 'test' in generated:
to_be_ignored.append('test-clean.csv')
print(f"{generated} already exists in "
f"{md_dir} it won't be overwritten")
for element in to_be_ignored:
librispeech_md_files.remove(element)
def create_librimix_df(librispeech_md_file, librispeech_dir,
wham_md_file, wham_dir, n_src):
""" Generate librimix dataframe from a LibriSpeech and wha md file"""
# Create a dataframe that will be used to generate sources and mixtures
mixtures_md = | pd.DataFrame(columns=['mixture_ID']) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import numpy as np
from datetime import datetime
import folium
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster
import plotly.express as px
st.set_page_config(layout='wide')
st.title('House Rocket Company')
st.markdown('Welcome to House Rocket Data Analysis.')
@st.cache(allow_output_mutation=True)
def get_data(path_of_data):
data_raw = pd.read_csv(path_of_data)
return data_raw
def set_feature(data):
# add new features
data['price_m2'] = data['price'] / (data['sqft_lot'] / 10.764)
return data
def overview_data(data):
data_r = data.copy()
data['date'] = pd.to_datetime(data['date'])
f_attributes = st.sidebar.multiselect('Columns', data.columns)
f_zipcode = st.sidebar.multiselect('Zipcode', data['zipcode'].unique())
st.title('Data Overview')
if (f_zipcode != []) and (f_attributes != []):
data = data.loc[data['zipcode'].isin(f_zipcode), f_attributes]
elif (f_zipcode != []) and (f_attributes == []):
data = data.loc[data['zipcode'].isin(f_zipcode), :]
elif (f_zipcode == []) and (f_attributes != []):
data = data.loc[:, f_attributes]
else:
data = data.loc[:, :]
st.dataframe(data)
c1, c2 = st.columns((2, 1))
# Average
df1 = data_r[['id', 'zipcode']].groupby('zipcode').count().reset_index()
df2 = data_r[['price', 'zipcode']].groupby('zipcode').mean().reset_index()
df3 = data_r[['sqft_living', 'zipcode']].groupby('zipcode').mean().reset_index()
df4 = data_r[['price_m2', 'zipcode']].groupby('zipcode').mean().reset_index()
# Merge
m1 = pd.merge(df1, df2, on='zipcode', how='inner')
m2 = pd.merge(m1, df3, on='zipcode', how='inner')
df_new = pd.merge(m2, df4, on='zipcode', how='inner')
df_new.columns = ['zipcode', 'total_houses', 'm_price', 'm_sqft_living', 'm_price_m2']
c1.header('Per Zipcode')
c1.dataframe(df_new.head(10), height=600)
# Descriptive Statistic
num_attributes = data_r.select_dtypes(include=['int64', 'float'])
media = pd.DataFrame(num_attributes.apply(np.mean))
mediana = pd.DataFrame(num_attributes.apply(np.median))
std = pd.DataFrame(num_attributes.apply(np.std))
max_ = pd.DataFrame(num_attributes.apply(np.max))
min_ = pd.DataFrame(num_attributes.apply(np.min))
df_descriptive = | pd.concat([media, mediana, max_, min_, std], axis=1) | pandas.concat |
# Regression and classification plugins based on CatBoost
import pandas as pd
import numpy as np
import os.path
import sklearn.metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
mean_squared_error,
mean_absolute_error,
median_absolute_error,
accuracy_score,
precision_score,
recall_score,
classification_report,
confusion_matrix,
)
import catboost
from catboost import CatBoostClassifier, CatBoostRegressor
from analitico.utilities import time_ms
import analitico.pandas
import analitico.schema
from analitico.schema import generate_schema, ANALITICO_TYPE_CATEGORY, ANALITICO_TYPE_INTEGER, ANALITICO_TYPE_FLOAT
from .interfaces import (
IAlgorithmPlugin,
PluginError,
plugin,
ALGORITHM_TYPE_REGRESSION,
ALGORITHM_TYPE_BINARY_CLASSICATION,
ALGORITHM_TYPE_MULTICLASS_CLASSIFICATION,
)
##
## CatBoostPlugin
##
@plugin
class CatBoostPlugin(IAlgorithmPlugin):
""" Base class for CatBoost regressor and classifier plugins """
results = None
class Meta(IAlgorithmPlugin.Meta):
name = "analitico.plugin.CatBoostPlugin"
algorithms = [
ALGORITHM_TYPE_REGRESSION,
ALGORITHM_TYPE_BINARY_CLASSICATION,
ALGORITHM_TYPE_MULTICLASS_CLASSIFICATION,
]
def create_model(self, results):
""" Creates actual CatBoostClassifier or CatBoostRegressor model """
iterations = self.get_attribute("parameters.iterations", 50)
learning_rate = self.get_attribute("parameters.learning_rate", 1)
depth = self.get_attribute("parameters.depth", 8)
if results:
results["parameters"]["iterations"] = iterations
results["parameters"]["learning_rate"] = learning_rate
results["parameters"]["depth"] = depth
algo = results.get("algorithm", ALGORITHM_TYPE_REGRESSION)
if algo == ALGORITHM_TYPE_REGRESSION:
return CatBoostRegressor(iterations=iterations, learning_rate=learning_rate, depth=depth)
elif algo == ALGORITHM_TYPE_BINARY_CLASSICATION:
# task_type="GPU", # runtime will pick up the GPU even if we don't specify it here
return CatBoostClassifier(
iterations=iterations, learning_rate=learning_rate, depth=depth, loss_function="Logloss"
)
elif algo == ALGORITHM_TYPE_MULTICLASS_CLASSIFICATION:
return CatBoostClassifier(
iterations=iterations, learning_rate=learning_rate, depth=depth, loss_function="MultiClass"
)
else:
raise PluginError("CatBoostPlugin.create_model - can't handle algorithm type: %s", results["algorithm"])
def get_categorical_idx(self, df):
""" Return indexes of the columns that should be considered categorical for the purpose of catboost training """
categorical_idx = []
for i, column in enumerate(df.columns):
if analitico.schema.get_column_type(df, column) is analitico.schema.ANALITICO_TYPE_CATEGORY:
categorical_idx.append(i)
df[column].replace(np.nan, "", regex=True, inplace=True)
self.factory.debug("%3d %s (%s/categorical)", i, column, df[column].dtype.name)
else:
self.factory.debug("%3d %s (%s)", i, column, df[column].dtype.name)
return categorical_idx
def validate_schema(self, train_df, test_df):
""" Checks training and test dataframes to make sure they have matching schemas """
train_schema = generate_schema(train_df)
if test_df:
test_schema = generate_schema(test_df)
train_columns = train_schema["columns"]
test_columns = test_schema["columns"]
if len(train_columns) != len(test_columns):
msg = "{} - training data has {} columns while test data has {} columns".format(
self.name, len(train_columns), len(test_columns)
)
raise PluginError(msg)
for i in range(0, len(train_columns)):
if train_columns[i]["name"] != test_columns[i]["name"]:
msg = "{} - column {} of train '{}' and test '{}' have different names".format(
self.name, i, train_columns[i]["name"], test_columns[i]["name"]
)
raise PluginError(msg)
if train_columns[i]["type"] != test_columns[i]["type"]:
msg = "{} - column {} of train '{}' and test '{}' have different names".format(
self.name, i, train_columns[i]["type"], test_columns[i]["type"]
)
raise PluginError(msg)
return train_schema
def score_training(
self,
model: catboost.CatBoost,
test_df: pd.DataFrame,
test_pool: catboost.Pool,
test_labels: pd.DataFrame,
results: dict,
):
""" Scores the results of this training """
for key, value in model.get_params().items():
results["parameters"][key] = value
results["scores"]["best_iteration"] = model.get_best_iteration()
best_score = model.get_best_score()
try:
best_score["training"] = best_score.pop("learn")
best_score["validation"] = best_score.pop("validation_0")
except KeyError:
pass
results["scores"]["best_score"] = best_score
# result for each evaluation epoch
evals_result = model.get_evals_result()
try:
evals_result["training"] = evals_result.pop("learn")
evals_result["validation"] = evals_result.pop("validation_0")
except KeyError:
pass
results["scores"]["iterations"] = evals_result
# catboost can tell which features weigh more heavily on the predictions
self.info("features importance:")
features_importance = results["scores"]["features_importance"] = {}
for label, importance in model.get_feature_importance(prettified=True):
features_importance[label] = round(importance, 5)
self.info("%24s: %8.4f", label, importance)
# make the prediction using the resulting model
# output test set with predictions
# after moving label to the end for easier reading
test_predictions = model.predict(test_pool)
label = test_labels.name
test_df[label] = test_labels
cols = list(test_df.columns.values)
cols.pop(cols.index(label))
test_df = test_df[cols + [label]]
test_df["prediction"] = test_predictions
test_df = analitico.pandas.pd_sample(test_df, 200) # just sampling
artifacts_path = self.factory.get_artifacts_directory()
test_df.to_csv(os.path.join(artifacts_path, "test.csv"))
def score_regressor_training(self, model, test_df, test_pool, test_labels, results):
test_preds = model.predict(test_pool)
results["scores"]["median_abs_error"] = round(median_absolute_error(test_preds, test_labels), 5)
results["scores"]["mean_abs_error"] = round(mean_absolute_error(test_preds, test_labels), 5)
results["scores"]["sqrt_mean_squared_error"] = round(np.sqrt(mean_squared_error(test_preds, test_labels)), 5)
def score_classifier_training(self, model, test_df, test_pool, test_labels, results):
""" Scores the results of this training for the CatBoostClassifier model """
# There are many metrics available:
# https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics
scores = results["scores"]
train_classes = results["data"]["classes"] # the classes (actual strings)
train_classes_codes = list(range(0, len(train_classes))) # the codes, eg: 0, 1, 2...
test_true = list(test_labels) # test true labels
test_preds = model.predict(test_pool, prediction_type="Class") # prediction for each test sample
test_probs = model.predict_proba(test_pool, verbose=True) # probability for each class for each sample
# Log loss, aka logistic loss or cross-entropy loss.
scores["log_loss"] = round(sklearn.metrics.log_loss(test_true, test_probs, labels=train_classes_codes), 5)
# In multilabel classification, this function computes subset accuracy:
# the set of labels predicted for a sample must exactly match the corresponding set of labels in y_true.
scores["accuracy_score"] = round(accuracy_score(test_true, test_preds), 5)
# The precision is the ratio tp / (tp + fp) where tp is the number of true positives
# and fp the number of false positives. The precision is intuitively the ability
# of the classifier not to label as positive a sample that is negative.
# The best value is 1 and the worst value is 0.
scores["precision_score_micro"] = round(precision_score(test_true, test_preds, average="micro"), 5)
scores["precision_score_macro"] = round(precision_score(test_true, test_preds, average="macro"), 5)
scores["precision_score_weighted"] = round(precision_score(test_true, test_preds, average="weighted"), 5)
# The recall is the ratio tp / (tp + fn) where tp is the number of true positives
# and fn the number of false negatives. The recall is intuitively the ability
# of the classifier to find all the positive samples.
scores["recall_score_micro"] = round(recall_score(test_true, test_preds, average="micro"), 5)
scores["recall_score_macro"] = round(recall_score(test_true, test_preds, average="macro"), 5)
scores["recall_score_weighted"] = round(recall_score(test_true, test_preds, average="weighted"), 5)
self.info("log_loss: %f", scores["log_loss"])
self.info("accuracy_score: %f", scores["accuracy_score"])
self.info("precision_score_micro: %f", scores["precision_score_micro"])
self.info("precision_score_macro: %f", scores["precision_score_macro"])
# complete classification report and confusion matrix
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html#sklearn.metrics.classification_report
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html#sklearn.metrics.confusion_matrix
scores["classification_report"] = classification_report(
test_true, test_preds, target_names=train_classes, output_dict=True
)
scores["confusion_matrix"] = confusion_matrix(test_true, test_preds).tolist()
def train(self, train, test, results, *args, **kwargs):
""" Train with algorithm and given data to produce a trained model """
try:
assert isinstance(train, pd.DataFrame) and len(train.columns) > 1
train_df = train
test_df = test
# if not specified the prediction target will be the last column of the dataset
label = self.get_attribute("data.label")
if not label:
label = train_df.columns[len(train_df.columns) - 1]
results["data"]["label"] = label
# choose between regression, binary classification and multiclass classification
label_type = analitico.schema.get_column_type(train_df, label)
self.info("label: %s", label)
self.info("label_type: %s", label_type)
if label_type == analitico.schema.ANALITICO_TYPE_CATEGORY:
label_classes = list(train_df[label].cat.categories)
results["data"]["classes"] = label_classes
train_df[label] = train_df[label].cat.codes
results["algorithm"] = (
ALGORITHM_TYPE_BINARY_CLASSICATION
if len(label_classes) == 2
else ALGORITHM_TYPE_MULTICLASS_CLASSIFICATION
)
self.info("classes: %s", label_classes)
else:
results["algorithm"] = ALGORITHM_TYPE_REGRESSION
self.info("algorithm: %s", results["algorithm"])
# remove rows with missing label from training and test sets
train_rows = len(train_df)
train_df = train_df.dropna(subset=[label])
if len(train_df) < train_rows:
self.warning("Training data has %s rows without '%s' label", train_rows - len(train_df), label)
if test_df:
test_rows = len(test_df)
test_df = test_df.dropna(subset=[label])
if len(test_df) < test_rows:
self.warning("Test data has %s rows without '%s' label", test_rows - len(test_df), label)
# make sure schemas match
train_schema = self.validate_schema(train_df, test_df)
# shortened training was requested?
tail = self.get_attribute("parameters.tail", 0)
if tail > 0:
self.info("Tail: %d, cutting training data", tail)
train_df = train_df.tail(tail).copy()
# create test set from training set if not provided
if not test_df:
# decide how to create test set from settings variable
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.TimeSeriesSplit.html
chronological = self.get_attribute("data.chronological", False)
test_size = self.get_attribute("parameters.test_size", 0.20)
results["data"]["chronological"] = chronological
results["parameters"]["test_size"] = test_size
if chronological:
# test set if from the last rows (chronological order)
self.info("Test set split: chronological")
test_rows = int(len(train_df) * test_size)
test_df = train_df[-test_rows:]
train_df = train_df[:-test_rows]
else:
# test set if from a random assortment of rows
self.info("Test set split: random")
train_df, test_df, = train_test_split(train_df, test_size=test_size, random_state=42)
self.info("training: %d rows", len(train_df))
self.info("testing: %d rows", len(test_df))
# validate data types
for column in train_schema["columns"]:
if column["type"] not in ("integer", "float", "boolean", "category"):
self.warning(
"Column '%s' of type '%s' is incompatible and will be dropped", column["name"], column["type"]
)
train_df = train_df.drop(column["name"], axis=1)
test_df = test_df.drop(column["name"], axis=1)
# save schema after dropping unused columns
results["data"]["schema"] = generate_schema(train_df)
results["data"]["source_records"] = len(train)
results["data"]["training_records"] = len(train_df)
results["data"]["test_records"] = len(test_df)
results["data"]["dropped_records"] = len(train) - len(train_df) - len(test_df)
# save some training data for debugging
artifacts_path = self.factory.get_artifacts_directory()
self.info("artifacts_path: %s", artifacts_path)
samples_df = analitico.pandas.pd_sample(train_df, 200)
samples_path = os.path.join(artifacts_path, "training-samples.json")
samples_df.to_json(samples_path, orient="records")
self.info("saved: %s (%d bytes)", samples_path, os.path.getsize(samples_path))
samples_path = os.path.join(artifacts_path, "training-samples.csv")
samples_df.to_csv(samples_path)
self.info("saved: %s (%d bytes)", samples_path, os.path.getsize(samples_path))
# split data and labels
train_labels = train_df[label]
train_df = train_df.drop([label], axis=1)
test_labels = test_df[label]
test_df = test_df.drop([label], axis=1)
# indexes of columns that should be considered categorical
categorical_idx = self.get_categorical_idx(train_df)
train_pool = catboost.Pool(train_df, train_labels, cat_features=categorical_idx)
test_pool = catboost.Pool(test_df, test_labels, cat_features=categorical_idx)
# create regressor or classificator then train
training_on = time_ms()
model = self.create_model(results)
model.fit(train_pool, eval_set=test_pool)
results["performance"]["training_ms"] = time_ms(training_on)
# score test set, add related metrics to results
self.score_training(model, test_df, test_pool, test_labels, results)
if results["algorithm"] == ALGORITHM_TYPE_REGRESSION:
self.score_regressor_training(model, test_df, test_pool, test_labels, results)
else:
self.score_classifier_training(model, test_df, test_pool, test_labels, results)
# save model file and training results
model_path = os.path.join(artifacts_path, "model.cbm")
model.save_model(model_path)
results["scores"]["model_size"] = os.path.getsize(model_path)
self.info("saved: %s (%d bytes)", model_path, os.path.getsize(model_path))
return results
except Exception as exc:
self.exception("CatBoostPlugin - error while training: %s", str(exc), exception=exc)
def predict(self, data, training, results, *args, **kwargs):
""" Return predictions from trained model """
# data should already come in as pd.DataFrame but it's just a dictionary we convert it
if not isinstance(data, pd.DataFrame):
data = | pd.DataFrame.from_dict(data, orient="columns") | pandas.DataFrame.from_dict |
from utils.model import Perceptron
from utils.all_utils import prepare_data,save_model,save_plot
import pandas as pd
import numpy as np
def main(data,eta,epochs,filename,plot_filename):
df = | pd.DataFrame(data) | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index= | Index(["q", "x", "y", "z"]) | pandas.Index |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals # NOQA
import plottool as pt
import utool as ut
from ibeis.algo.verif import vsone
from ibeis.scripts._thesis_helpers import DBInputs
from ibeis.scripts.thesis import Sampler # NOQA
from ibeis.scripts._thesis_helpers import Tabular, upper_one, ave_str
from ibeis.scripts._thesis_helpers import dbname_to_species_nice
from ibeis.scripts._thesis_helpers import TMP_RC, W, H, DPI
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP, UNREV # NOQA
import numpy as np # NOQA
import pandas as pd
import ubelt as ub # NOQA
import itertools as it
import matplotlib as mpl
from os.path import basename, join, splitext, exists # NOQA
import ibeis.constants as const
import vtool as vt
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP, UNREV, UNKWN # NOQA
(print, rrr, profile) = ut.inject2(__name__)
CLF = 'VAMP'
LNBNN = 'LNBNN'
def turk_pz():
import ibeis
ibs = ibeis.opendb('GZ_Master1')
infr = ibeis.AnnotInference(ibs, aids='all')
infr.reset_feedback('staging', apply=True)
infr.relabel_using_reviews(rectify=True)
# infr.apply_nondynamic_update()
print(ut.repr4(infr.status()))
infr.ibeis_delta_info()
infr.match_state_delta()
infr.get_ibeis_name_delta()
infr.relabel_using_reviews(rectify=True)
infr.write_ibeis_annotmatch_feedback()
infr.write_ibeis_name_assignment()
pass
@ut.reloadable_class
class GraphExpt(DBInputs):
"""
TODO:
- [ ] Experimental analysis of duration of each phase and state of
graph.
- [ ] Experimental analysis of phase 3, including how far we can get
with automatic decision making and do we discover new merges? If
there are potential merges, can we run phase iii with exactly the
same ordering as before: ordering by probability for automatically
decidable and then by positive probability for others. This should
work for phase 3 and therefore allow a clean combination of the
three phases and our termination criteria. I just thought of this
so don't really have it written cleanly above.
- [ ] Experimental analysis of choice of automatic decision thresholds.
by lowering the threshold we increase the risk of mistakes. Each
mistake costs some number of manual reviews (perhaps 2-3), but if
the frequency of errors is low then we could be saving ourselves a
lot of manual reviews.
\item OTHER SPECIES
CommandLine:
python -m ibeis GraphExpt.measure all PZ_MTEST
Ignore:
>>> from ibeis.scripts.postdoc import *
>>> self = GraphExpt('PZ_MTEST')
>>> self._precollect()
>>> self._setup()
"""
base_dpath = ut.truepath('~/Desktop/graph_expt')
def _precollect(self):
if self.ibs is None:
_GraphExpt = ut.fix_super_reload(GraphExpt, self)
super(_GraphExpt, self)._precollect()
# Split data into a training and testing test
ibs = self.ibs
annots = ibs.annots(self.aids_pool)
names = list(annots.group_items(annots.nids).values())
ut.shuffle(names, rng=321)
train_names, test_names = names[0::2], names[1::2]
train_aids, test_aids = map(ut.flatten, (train_names, test_names))
self.test_train = train_aids, test_aids
params = {}
self.pblm = vsone.OneVsOneProblem.from_aids(
ibs, train_aids, **params)
# ut.get_nonconflicting_path(dpath, suffix='_old')
self.const_dials = {
# 'oracle_accuracy' : (0.98, 1.0),
# 'oracle_accuracy' : (0.98, .98),
'oracle_accuracy' : (0.99, .99),
'k_redun' : 2,
'max_outer_loops' : np.inf,
# 'max_outer_loops' : 1,
}
config = ut.dict_union(self.const_dials)
cfg_prefix = '{}_{}'.format(len(test_aids), len(train_aids))
self._setup_links(cfg_prefix, config)
def _setup(self):
"""
python -m ibeis GraphExpt._setup
Example:
>>> from ibeis.scripts.postdoc import *
>>> #self = GraphExpt('GZ_Master1')
>>> self = GraphExpt('PZ_MTEST')
>>> self = GraphExpt('PZ_Master1')
>>> self._setup()
"""
self._precollect()
train_aids, test_aids = self.test_train
task_key = 'match_state'
pblm = self.pblm
data_key = pblm.default_data_key
clf_key = pblm.default_clf_key
pblm.eval_data_keys = [data_key]
pblm.setup(with_simple=False)
pblm.learn_evaluation_classifiers()
res = pblm.task_combo_res[task_key][clf_key][data_key]
# pblm.report_evaluation()
# TODO: need more principled way of selecting thresholds
# graph_thresh = res.get_pos_threshes('fpr', 0.01)
graph_thresh = res.get_pos_threshes('fpr', 0.001)
# rankclf_thresh = res.get_pos_threshes(fpr=0.01)
# Load or create the deploy classifiers
clf_dpath = ut.ensuredir((self.dpath, 'clf'))
classifiers = pblm.ensure_deploy_classifiers(dpath=clf_dpath)
sim_params = {
'test_aids': test_aids,
'train_aids': train_aids,
'classifiers': classifiers,
'graph_thresh': graph_thresh,
# 'rankclf_thresh': rankclf_thresh,
'const_dials': self.const_dials,
}
self.pblm = pblm
self.sim_params = sim_params
return sim_params
def measure_all(self):
self.measure_graphsim()
@profile
def measure_graphsim(self):
"""
CommandLine:
python -m ibeis GraphExpt.measure graphsim GZ_Master1
1
Ignore:
>>> from ibeis.scripts.postdoc import *
>>> #self = GraphExpt('PZ_MTEST')
>>> #self = GraphExpt('GZ_Master1')
>>> self = GraphExpt.measure('graphsim', 'PZ_Master1')
>>> self = GraphExpt.measure('graphsim', 'GZ_Master1')
>>> self = GraphExpt.measure('graphsim', 'PZ_MTEST')
"""
import ibeis
self.ensure_setup()
ibs = self.ibs
sim_params = self.sim_params
classifiers = sim_params['classifiers']
test_aids = sim_params['test_aids']
graph_thresh = sim_params['graph_thresh']
const_dials = sim_params['const_dials']
sim_results = {}
verbose = 1
# ----------
# Graph test
dials1 = ut.dict_union(const_dials, {
'name' : 'graph',
'enable_inference' : True,
'match_state_thresh' : graph_thresh,
})
infr1 = ibeis.AnnotInference(ibs=ibs, aids=test_aids, autoinit=True,
verbose=verbose)
infr1.enable_auto_prioritize_nonpos = True
infr1.params['refresh.window'] = 20
infr1.params['refresh.thresh'] = 0.052
infr1.params['refresh.patience'] = 72
infr1.params['redun.enforce_pos'] = True
infr1.params['redun.enforce_neg'] = True
infr1.init_simulation(classifiers=classifiers, **dials1)
infr1.init_test_mode()
infr1.reset(state='empty')
# if False:
# infr = infr1
# infr.init_refresh()
# n_prioritized = infr.refresh_candidate_edges()
# gen = infr.lnbnn_priority_gen(use_refresh=True)
# next(gen)
# edge = (25, 118)
list(infr1.main_gen())
# infr1.main_loop()
sim_results['graph'] = self._collect_sim_results(infr1, dials1)
# ------------
# Dump experiment output to disk
expt_name = 'graphsim'
self.expt_results[expt_name] = sim_results
ut.ensuredir(self.dpath)
ut.save_data(join(self.dpath, expt_name + '.pkl'), sim_results)
def _collect_sim_results(self, infr, dials):
pred_confusion = pd.DataFrame(infr.test_state['confusion'])
pred_confusion.index.name = 'real'
pred_confusion.columns.name = 'pred'
print('Edge confusion')
print(pred_confusion)
expt_data = {
'real_ccs': list(infr.nid_to_gt_cc.values()),
'pred_ccs': list(infr.pos_graph.connected_components()),
'graph': infr.graph.copy(),
'dials': dials,
'refresh_thresh': infr.refresh._prob_any_remain_thresh,
'metrics': infr.metrics_list,
}
return expt_data
def draw_graphsim(self):
"""
CommandLine:
python -m ibeis GraphExpt.measure graphsim GZ_Master1
python -m ibeis GraphExpt.draw graphsim GZ_Master1 --diskshow
python -m ibeis GraphExpt.draw graphsim PZ_MTEST --diskshow
python -m ibeis GraphExpt.draw graphsim GZ_Master1 --diskshow
python -m ibeis GraphExpt.draw graphsim PZ_Master1 --diskshow
Ignore:
>>> from ibeis.scripts.postdoc import *
>>> self = GraphExpt('GZ_Master1')
>>> self = GraphExpt('PZ_MTEST')
"""
sim_results = self.ensure_results('graphsim')
metric_nice = {
'n_errors': '# errors',
'n_manual': '# manual reviews',
'frac_mistake_aids': 'fraction error annots',
'merge_remain': 'fraction of merges remain',
}
# keys = ['ranking', 'rank+clf', 'graph']
# keycols = ['red', 'orange', 'b']
keys = ['graph']
keycols = ['b']
colors = ut.dzip(keys, keycols)
dfs = {k: pd.DataFrame(v['metrics'])
for k, v in sim_results.items()}
n_aids = sim_results['graph']['graph'].number_of_nodes()
df = dfs['graph']
df['frac_mistake_aids'] = df.n_mistake_aids / n_aids
# mdf = pd.concat(dfs.values(), keys=dfs.keys())
import xarray as xr
panel = xr.concat(
[xr.DataArray(df, dims=('ts', 'metric'))
for df in dfs.values()],
dim=pd.Index(list(dfs.keys()), name='key')
)
xmax = panel.sel(metric='n_manual').values.max()
xpad = (1.01 * xmax) - xmax
pnum_ = pt.make_pnum_nextgen(nSubplots=2)
mpl.rcParams.update(TMP_RC)
fnum = 1
pt.figure(fnum=fnum, pnum=pnum_())
ax = pt.gca()
xkey, ykey = 'n_manual', 'merge_remain'
datas = panel.sel(metric=[xkey, ykey])
for key in keys:
ax.plot(*datas.sel(key=key).values.T, label=key, color=colors[key])
ax.set_ylim(0, 1)
ax.set_xlim(-xpad, xmax + xpad)
ax.set_xlabel(metric_nice[xkey])
ax.set_ylabel(metric_nice[ykey])
ax.legend()
pt.figure(fnum=fnum, pnum=pnum_())
ax = pt.gca()
xkey, ykey = 'n_manual', 'frac_mistake_aids'
datas = panel.sel(metric=[xkey, ykey])
for key in keys:
ax.plot(*datas.sel(key=key).values.T, label=key, color=colors[key])
ax.set_ylim(0, datas.T[1].max() * 1.01)
ax.set_xlim(-xpad, xmax + xpad)
ax.set_xlabel(metric_nice[xkey])
ax.set_ylabel(metric_nice[ykey])
ax.legend()
fig = pt.gcf() # NOQA
fig.set_size_inches([W, H * .75])
pt.adjust_subplots(wspace=.25, fig=fig)
fpath = join(self.dpath, 'simulation.png')
vt.imwrite(fpath, pt.render_figure_to_image(fig, dpi=DPI))
if ut.get_argflag('--diskshow'):
ut.startfile(fpath)
def draw_graphsim2(self):
"""
CommandLine:
python -m ibeis GraphExpt.draw graphsim2 --db PZ_MTEST --diskshow
python -m ibeis GraphExpt.draw graphsim2 GZ_Master1 --diskshow
python -m ibeis GraphExpt.draw graphsim2 PZ_Master1 --diskshow
Example:
>>> from ibeis.scripts.thesis import *
>>> dbname = ut.get_argval('--db', default='GZ_Master1')
>>> self = GraphExpt(dbname)
>>> self.draw_graphsim2()
>>> ut.show_if_requested()
"""
mpl.rcParams.update(TMP_RC)
sim_results = self.ensure_results('graphsim')
expt_data = sim_results['graph']
metrics_df = pd.DataFrame.from_dict(expt_data['metrics'])
# n_aids = sim_results['graph']['graph'].number_of_nodes()
# metrics_df['frac_mistake_aids'] = metrics_df.n_mistake_aids / n_aids
fnum = 1 # NOQA
default_flags = {
'phase': True,
'pred': False,
'user': True,
'real': True,
'error': 0,
'recover': 1,
}
def plot_intervals(flags, color=None, low=0, high=1):
ax = pt.gca()
idxs = np.where(flags)[0]
ranges = ut.group_consecutives(idxs)
bounds = [(min(a), max(a)) for a in ranges if len(a) > 0]
xdata_ = xdata.values
xs, ys = [xdata_[0]], [low]
for a, b in bounds:
x1, x2 = xdata_[a], xdata_[b]
# if x1 == x2:
x1 -= .5
x2 += .5
xs.extend([x1, x1, x2, x2])
ys.extend([low, high, high, low])
xs.append(xdata_[-1])
ys.append(low)
ax.fill_between(xs, ys, low, alpha=.6, color=color)
def overlay_actions(ymax=1, kw=None):
"""
Draws indicators that detail the algorithm state at given
timestamps.
"""
phase = metrics_df['phase'].map(
lambda x: x.split('_')[0])
is_correct = metrics_df['test_action'].map(
lambda x: x.startswith('correct')).values
recovering = metrics_df['recovering'].values
is_auto = metrics_df['user_id'].map(
lambda x: x.startswith('algo:')).values
ppos = metrics_df['pred_decision'].map(
lambda x: x == POSTV).values
rpos = metrics_df['true_decision'].map(
lambda x: x == POSTV).values
# ymax = max(metrics_df['n_errors'])
if kw is None:
kw = default_flags
num = sum(kw.values())
steps = np.linspace(0, 1, num + 1) * ymax
i = -1
def stacked_interval(data, color, i):
plot_intervals(data, color, low=steps[i], high=steps[i + 1])
if kw.get('user', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()),
'user(algo=gold,manual=blue)')
stacked_interval(is_auto, 'gold', i)
stacked_interval(~is_auto, 'blue', i)
if kw.get('pred', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()), 'pred_pos')
stacked_interval(ppos, 'aqua', low=steps[i], high=steps[i + 1])
# stacked_interval(~ppos, 'salmon', i)
if kw.get('real', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()), 'real_merge')
stacked_interval(rpos, 'lime', i)
# stacked_interval(~ppos, 'salmon', i)
if kw.get('error', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()), 'is_error')
# stacked_interval(is_correct, 'blue', low=steps[i], high=steps[i + 1])
stacked_interval(~is_correct, 'red', i)
if kw.get('recover', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()), 'is_recovering')
stacked_interval(recovering, 'orange', i)
if kw.get('phase', False):
i += 1
pt.absolute_text((.2, steps[i:i + 2].mean()),
'phase(1=yellow, 2=aqua, 3=pink)')
stacked_interval(phase == 'ranking', 'yellow', i)
stacked_interval(phase == 'posredun', 'aqua', i)
stacked_interval(phase == 'negredun', 'pink', i)
# stacked_interval(phase == 'ranking', 'red', i)
# stacked_interval(phase == 'posredun', 'green', i)
# stacked_interval(phase == 'negredun', 'blue', i)
def accuracy_plot(xdata, xlabel):
ydatas = ut.odict([
('Graph', metrics_df['merge_remain']),
])
pt.multi_plot(
xdata, ydatas, marker='', markersize=1,
xlabel=xlabel, ylabel='fraction of merge remaining',
ymin=0, rcParams=TMP_RC,
use_legend=True, fnum=1, pnum=pnum_(),
)
def error_plot(xdata, xlabel):
# ykeys = ['n_errors']
ykeys = ['frac_mistake_aids']
pt.multi_plot(
xdata, metrics_df[ykeys].values.T,
xlabel=xlabel,
ylabel='fraction error annots',
marker='', markersize=1, ymin=0, rcParams=TMP_RC,
fnum=1, pnum=pnum_(),
use_legend=False,
)
def refresh_plot(xdata, xlabel):
pt.multi_plot(
xdata, [metrics_df['pprob_any']],
label_list=['P(C=1)'],
xlabel=xlabel, ylabel='refresh criteria',
marker='', ymin=0, ymax=1, rcParams=TMP_RC,
fnum=1, pnum=pnum_(),
use_legend=False,
)
ax = pt.gca()
thresh = expt_data['refresh_thresh']
ax.plot([min(xdata), max(xdata)], [thresh, thresh], '-g',
label='refresh thresh')
ax.legend()
def error_breakdown_plot(xdata, xlabel):
ykeys = ['n_fn', 'n_fp']
pt.multi_plot(
xdata, metrics_df[ykeys].values.T,
label_list=ykeys,
xlabel=xlabel, ylabel='# of errors',
marker='x', markersize=1, ymin=0, rcParams=TMP_RC,
ymax=max(metrics_df['n_errors']),
fnum=1, pnum=pnum_(),
use_legend=True,
)
def neg_redun_plot(xdata, xlabel):
n_pred = len(sim_results['graph']['pred_ccs'])
z = (n_pred * (n_pred - 1)) / 2
metrics_df['p_neg_redun'] = metrics_df['n_neg_redun'] / z
metrics_df['p_neg_redun1'] = metrics_df['n_neg_redun1'] / z
ykeys = ['p_neg_redun', 'p_neg_redun1']
pt.multi_plot(
xdata, metrics_df[ykeys].values.T,
label_list=ykeys,
xlabel=xlabel, ylabel='% neg-redun-meta-edges',
marker='x', markersize=1, ymin=0, rcParams=TMP_RC,
ymax=max(metrics_df['p_neg_redun1']),
fnum=1, pnum=pnum_(),
use_legend=True,
)
pnum_ = pt.make_pnum_nextgen(nRows=2, nSubplots=6)
# --- ROW 1 ---
xdata = metrics_df['n_decision']
xlabel = '# decisions'
accuracy_plot(xdata, xlabel)
# overlay_actions(1)
error_plot(xdata, xlabel)
overlay_actions(max(metrics_df['frac_mistake_aids']))
# overlay_actions(max(metrics_df['n_errors']))
# refresh_plot(xdata, xlabel)
# overlay_actions(1, {'phase': True})
# error_breakdown_plot(xdata, xlabel)
neg_redun_plot(xdata, xlabel)
# --- ROW 2 ---
xdata = metrics_df['n_manual']
xlabel = '# manual reviews'
accuracy_plot(xdata, xlabel)
# overlay_actions(1)
error_plot(xdata, xlabel)
overlay_actions(max(metrics_df['frac_mistake_aids']))
# overlay_actions(max(metrics_df['n_errors']))
# refresh_plot(xdata, xlabel)
# overlay_actions(1, {'phase': True})
# error_breakdown_plot(xdata, xlabel)
neg_redun_plot(xdata, xlabel)
# fpath = join(self.dpath, expt_name + '2' + '.png')
# fig = pt.gcf() # NOQA
# fig.set_size_inches([W * 1.5, H * 1.1])
# vt.imwrite(fpath, pt.render_figure_to_image(fig, dpi=DPI))
# if ut.get_argflag('--diskshow'):
# ut.startfile(fpath)
# fig.save_fig
# if 1:
# pt.figure(fnum=fnum, pnum=(2, 2, 4))
# overlay_actions(ymax=1)
pt.set_figtitle(self.dbname)
fig = pt.gcf() # NOQA
fig.set_size_inches([W * 2, H * 2.5])
fig.suptitle(self.dbname)
pt.adjust_subplots(hspace=.25, wspace=.25, fig=fig)
fpath = join(self.dpath, 'graphsim2.png')
fig.savefig(fpath, dpi=DPI)
# vt.imwrite(fpath, pt.render_figure_to_image(fig, dpi=DPI))
if ut.get_argflag('--diskshow'):
ut.startfile(fpath)
def draw_match_states():
import ibeis
infr = ibeis.AnnotInference('PZ_Master1', 'all')
if infr.ibs.dbname == 'PZ_Master1':
# [UUID('0cb1ebf5-2a4f-4b80-b172-1b449b8370cf'),
# UUID('cd644b73-7978-4a5f-b570-09bb631daa75')]
chosen = {
POSTV: (17095, 17225),
NEGTV: (3966, 5080),
INCMP: (3197, 8455),
}
else:
infr.reset_feedback('staging')
chosen = {
POSTV: list(infr.pos_graph.edges())[0],
NEGTV: list(infr.neg_graph.edges())[0],
INCMP: list(infr.incmp_graph.edges())[0],
}
import plottool as pt
import vtool as vt
for key, edge in chosen.items():
match = infr._make_matches_from([edge], config={
'match_config': {'ratio_thresh': .7}})[0]
with pt.RenderingContext(dpi=300) as ctx:
match.show(heatmask=True, show_ell=False, show_ori=False,
show_lines=False)
vt.imwrite('matchstate_' + key + '.jpg', ctx.image)
def entropy_potential(infr, u, v, decision):
"""
Returns the number of edges this edge would invalidate
from ibeis.algo.graph import demo
infr = demo.demodata_infr(pcc_sizes=[5, 2, 4, 2, 2, 1, 1, 1])
infr.refresh_candidate_edges()
infr.params['redun.neg'] = 1
infr.params['redun.pos'] = 1
infr.apply_nondynamic_update()
ut.qtensure()
infr.show(show_cand=True, groupby='name_label')
u, v = 1, 7
decision = 'positive'
"""
nid1, nid2 = infr.pos_graph.node_labels(u, v)
# Cases for K=1
if decision == 'positive' and nid1 == nid2:
# The actual reduction is the number previously needed to make the cc
# k-edge-connected vs how many its needs now.
# In the same CC does nothing
# (unless k > 1, in which case check edge connectivity)
return 0
elif decision == 'positive' and nid1 != nid2:
# Between two PCCs reduces the number of PCCs by one
n_ccs = infr.pos_graph.number_of_components()
# Find needed negative redundency when appart
if infr.neg_redun_metagraph.has_node(nid1):
neg_redun_set1 = set(infr.neg_redun_metagraph.neighbors(nid1))
else:
neg_redun_set1 = set()
if infr.neg_redun_metagraph.has_node(nid2):
neg_redun_set2 = set(infr.neg_redun_metagraph.neighbors(nid2))
else:
neg_redun_set2 = set()
# The number of negative edges needed before we place this edge
# is the number of PCCs that each PCC doesnt have a negative edge to
# yet
n_neg_need1 = (n_ccs - len(neg_redun_set1) - 1)
n_neg_need2 = (n_ccs - len(neg_redun_set2) - 1)
n_neg_need_before = n_neg_need1 + n_neg_need2
# After we join them we take the union of their negative redundancy
# (really we should check if it changes after)
# and this is now the new number of negative edges that would be needed
neg_redun_after = neg_redun_set1.union(neg_redun_set2) - {nid1, nid2}
n_neg_need_after = (n_ccs - 2) - len(neg_redun_after)
neg_entropy = n_neg_need_before - n_neg_need_after # NOQA
def _find_good_match_states(infr, ibs, edges):
pos_edges = list(infr.pos_graph.edges())
timedelta = ibs.get_annot_pair_timedelta(*zip(*edges))
edges = ut.take(pos_edges, ut.argsort(timedelta))[::-1]
wgt = infr.qt_edge_reviewer(edges)
neg_edges = ut.shuffle(list(infr.neg_graph.edges()))
wgt = infr.qt_edge_reviewer(neg_edges)
if infr.incomp_graph.number_of_edges() > 0:
incmp_edges = list(infr.incomp_graph.edges())
if False:
ibs = infr.ibs
# a1, a2 = map(ibs.annots, zip(*incmp_edges))
# q1 = np.array(ut.replace_nones(a1.qual, np.nan))
# q2 = np.array(ut.replace_nones(a2.qual, np.nan))
# edges = ut.compress(incmp_edges,
# ((q1 > 3) | np.isnan(q1)) &
# ((q2 > 3) | np.isnan(q2)))
# a = ibs.annots(asarray=True)
# flags = [t is not None and 'right' == t for t in a.viewpoint_code]
# r = a.compress(flags)
# flags = [q is not None and q > 4 for q in r.qual]
rights = ibs.filter_annots_general(view='right',
minqual='excellent',
require_quality=True,
require_viewpoint=True)
lefts = ibs.filter_annots_general(view='left',
minqual='excellent',
require_quality=True,
require_viewpoint=True)
if False:
edges = list(infr._make_rankings(3197, rights))
wgt = infr.qt_edge_reviewer(edges)
edges = list(ut.random_product((rights, lefts), num=10, rng=0))
wgt = infr.qt_edge_reviewer(edges)
for edge in incmp_edges:
match = infr._make_matches_from([edge])[0]
# infr._debug_edge_gt(edge)
def prepare_cdfs(cdfs, labels):
cdfs = vt.pad_vstack(cdfs, fill_value=1)
# Sort so the best is on top
sortx = np.lexsort(cdfs.T[::-1])[::-1]
cdfs = cdfs[sortx]
labels = ut.take(labels, sortx)
return cdfs, labels
def plot_cmcs(cdfs, labels, fnum=1, pnum=(1, 1, 1), ymin=.4):
cdfs, labels = prepare_cdfs(cdfs, labels)
# Truncte to 20 ranks
num_ranks = min(cdfs.shape[-1], 20)
xdata = np.arange(1, num_ranks + 1)
cdfs_trunc = cdfs[:, 0:num_ranks]
label_list = ['%6.3f%% - %s' % (cdf[0] * 100, lbl)
for cdf, lbl in zip(cdfs_trunc, labels)]
# ymin = .4
num_yticks = (10 - int(ymin * 10)) + 1
pt.multi_plot(
xdata, cdfs_trunc, label_list=label_list,
xlabel='rank', ylabel='match probability',
use_legend=True, legend_loc='lower right', num_yticks=num_yticks,
ymax=1, ymin=ymin, ypad=.005, xmin=.9, num_xticks=5,
xmax=num_ranks + 1 - .5,
pnum=pnum, fnum=fnum,
rcParams=TMP_RC,
)
return pt.gcf()
@ut.reloadable_class
class VerifierExpt(DBInputs):
"""
Collect data from experiments to visualize
python -m ibeis VerifierExpt.measure all PZ_Master1.GZ_Master1,GIRM_Master1,MantaMatcher,RotanTurtles,humpbacks_fb,LF_ALL
python -m ibeis VerifierExpt.measure all GIRM_Master1,PZ_Master1,LF_ALL
python -m ibeis VerifierExpt.measure all LF_ALL
python -m ibeis VerifierExpt.measure all PZ_Master1
python -m ibeis VerifierExpt.measure all MantaMatcher
python -m ibeis VerifierExpt.draw all MantaMatcher
python -m ibeis VerifierExpt.draw rerank PZ_Master1
python -m ibeis VerifierExpt.measure all RotanTurtles
python -m ibeis VerifierExpt.draw all RotanTurtles
Ignore:
>>> from ibeis.scripts.postdoc import *
>>> fpath = ut.glob(ut.truepath('~/Desktop/mtest_plots'), '*.pkl')[0]
>>> self = ut.load_data(fpath)
"""
# base_dpath = ut.truepath('~/Desktop/pair_expts')
base_dpath = ut.truepath('~/latex/crall-iccvw-2017/figures')
agg_dbnames = [
'PZ_Master1',
'GZ_Master1',
# 'LF_ALL',
'MantaMatcher', 'RotanTurtles',
'humpbacks_fb', 'GIRM_Master1',
]
task_nice_lookup = {
'match_state': const.EVIDENCE_DECISION.CODE_TO_NICE,
'photobomb_state': {
'pb': 'Photobomb',
'notpb': 'Not Photobomb',
}
}
def _setup(self, quick=False):
r"""
CommandLine:
python -m ibeis VerifierExpt._setup --db GZ_Master1
python -m ibeis VerifierExpt._setup --db PZ_Master1 --eval
python -m ibeis VerifierExpt._setup --db PZ_MTEST
python -m ibeis VerifierExpt._setup --db PZ_PB_RF_TRAIN
python -m ibeis VerifierExpt.measure_all --db PZ_PB_RF_TRAIN
python -m ibeis VerifierExpt.measure all GZ_Master1
python -m ibeis VerifierExpt.measure all RotanTurtles --show
Example:
>>> from ibeis.scripts.postdoc import *
>>> dbname = ut.get_argval('--db', default='GZ_Master1')
>>> self = VerifierExpt(dbname)
>>> self._setup()
Ignore:
from ibeis.scripts.postdoc import *
self = VerifierExpt('PZ_Master1')
from ibeis.scripts.postdoc import *
self = VerifierExpt('PZ_PB_RF_TRAIN')
from ibeis.scripts.postdoc import *
self = VerifierExpt('LF_ALL')
self = VerifierExpt('RotanTurtles')
task = pblm.samples.subtasks['match_state']
ind_df = task.indicator_df
dist = ibs.get_annotedge_viewdist(ind_df.index.tolist())
np.all(ind_df[dist > 1]['notcomp'])
self.ibs.print_annot_stats(aids, prefix='P')
"""
self._precollect()
print('VerifierExpt _setup()')
ibs = self.ibs
aids = self.aids_pool
# pblm = vsone.OneVsOneProblem.from_aids(ibs, aids, sample_method='random')
pblm = vsone.OneVsOneProblem.from_aids(
ibs, aids,
sample_method='lnbnn+random',
# sample_method='random',
n_splits=10,
)
data_key = 'learn(sum)' # tests without global features
# data_key = 'learn(sum,glob)' # tests with global features
# data_key = pblm.default_data_key # same as learn(sum,glob)
clf_key = pblm.default_clf_key
pblm.eval_task_keys = ['match_state']
# test with and without globals
pblm.eval_data_keys = ['learn(sum)', 'learn(sum,glob)']
# pblm.eval_data_keys = [data_key]
pblm.eval_clf_keys = [clf_key]
ibs = pblm.infr.ibs
# pblm.samples.print_info()
species_code = ibs.get_database_species(pblm.infr.aids)[0]
if species_code == 'zebra_plains':
species = 'Plains Zebras'
if species_code == 'zebra_grevys':
species = 'Grévy\'s Zebras'
else:
species = species_code
self.pblm = pblm
self.species = species
self.data_key = data_key
self.clf_key = clf_key
if quick:
return
pblm.setup_evaluation(with_simple=True)
pblm.report_evaluation()
self.eval_task_keys = pblm.eval_task_keys
cfg_prefix = '{}'.format(len(pblm.samples))
config = pblm.hyper_params
self._setup_links(cfg_prefix, config)
print('Finished setup')
@classmethod
def agg_dbstats(VerifierExpt):
"""
CommandLine:
python -m ibeis VerifierExpt.agg_dbstats
python -m ibeis VerifierExpt.measure_dbstats
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.postdoc import * # NOQA
>>> result = VerifierExpt.agg_dbstats()
>>> print(result)
"""
dfs = []
for dbname in VerifierExpt.agg_dbnames:
self = VerifierExpt(dbname)
info = self.ensure_results('dbstats', nocompute=False)
sample_info = self.ensure_results('sample_info', nocompute=False)
# info = self.measure_dbstats()
outinfo = info['outinfo']
task = sample_info['subtasks']['match_state']
y_ind = task.indicator_df
outinfo['Positive'] = (y_ind[POSTV]).sum()
outinfo['Negative'] = (y_ind[NEGTV]).sum()
outinfo['Incomparable'] = (y_ind[INCMP]).sum()
if outinfo['Database'] == 'mantas':
outinfo['Database'] = 'manta rays'
dfs.append(outinfo)
# labels.append(self.species_nice.capitalize())
df = pd.DataFrame(dfs)
print('df =\n{!r}'.format(df))
df = df.set_index('Database')
df.index.name = None
tabular = Tabular(df, colfmt='numeric')
tabular.theadify = 16
enc_text = tabular.as_tabular()
print(enc_text)
ut.write_to(join(VerifierExpt.base_dpath, 'agg-dbstats.tex'), enc_text)
_ = ut.render_latex(enc_text, dpath=self.base_dpath, fname='agg-dbstats',
preamb_extra=['\\usepackage{makecell}'])
_
# ut.startfile(_)
@classmethod
def agg_results(VerifierExpt, task_key):
"""
python -m ibeis VerifierExpt.agg_results
python -m ibeis VerifierExpt.agg_results --link link-paper-final
GZ_Master1,LF_ALL,MantaMatcher,RotanTurtles,humpbacks_fb,GIRM_Master1
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.postdoc import * # NOQA
>>> task_key = 'match_state'
>>> result = VerifierExpt.agg_results(task_key)
>>> print(result)
"""
VerifierExpt.agg_dbstats()
dbnames = VerifierExpt.agg_dbnames
all_results = ut.odict([])
for dbname in VerifierExpt.agg_dbnames:
self = VerifierExpt(dbname)
info = self.ensure_results('all')
all_results[dbname] = info
rerank_results = ut.odict([])
for dbname in VerifierExpt.agg_dbnames:
self = VerifierExpt(dbname)
info = self.ensure_results('rerank')
rerank_results[dbname] = info
rank_curves = ub.AutoOrderedDict()
rank1_cmc_table = pd.DataFrame(columns=[LNBNN, CLF])
rank5_cmc_table = pd.DataFrame(columns=[LNBNN, CLF])
n_dbs = len(all_results)
color_cycle = mpl.rcParams['axes.prop_cycle'].by_key()['color'][:n_dbs]
color_cycle = ['r', 'b', 'purple', 'orange', 'deeppink', 'g']
markers = pt.distinct_markers(n_dbs)
dbprops = ub.AutoDict()
for n, dbname in enumerate(dbnames):
dbprops[dbname]['color'] = color_cycle[n]
dbprops[dbname]['marker'] = markers[n]
def highlight_metric(metric, data1, data2):
# Highlight the bigger one for each metric
for d1, d2 in it.permutations([data1, data2], 2):
text = '{:.3f}'.format(d1[metric])
if d1[metric] >= d2[metric]:
d1[metric + '_tex'] = '\\mathbf{' + text + '}'
d1[metric + '_text'] = text + '*'
else:
d1[metric + '_tex'] = text
d1[metric + '_text'] = text
for dbname in dbnames:
results = all_results[dbname]
data_key = results['data_key']
clf_key = results['clf_key']
lnbnn_data = results['lnbnn_data']
task_combo_res = results['task_combo_res']
res = task_combo_res[task_key][clf_key][data_key]
nice = dbname_to_species_nice(dbname)
# ranking results
results = rerank_results[dbname]
cdfs, infos = list(zip(*results))
lnbnn_cdf, clf_cdf = cdfs
cdfs = {
CLF: clf_cdf,
LNBNN: lnbnn_cdf,
}
rank1_cmc_table.loc[nice, LNBNN] = lnbnn_cdf[0]
rank1_cmc_table.loc[nice, CLF] = clf_cdf[0]
rank5_cmc_table.loc[nice, LNBNN] = lnbnn_cdf[4]
rank5_cmc_table.loc[nice, CLF] = clf_cdf[4]
# Check the ROC for only things in the top of the LNBNN ranked lists
# nums = [1, 2, 3, 4, 5, 10, 20, np.inf]
nums = [1, 5, np.inf]
for num in nums:
ranks = lnbnn_data['rank_lnbnn_1vM'].values
sub_data = lnbnn_data[ranks <= num]
scores = sub_data['score_lnbnn_1vM'].values
y = sub_data[POSTV].values
probs = res.probs_df[POSTV].loc[sub_data.index].values
cfsm_vsm = vt.ConfusionMetrics().fit(scores, y)
cfsm_clf = vt.ConfusionMetrics().fit(probs, y)
algo_confusions = {
LNBNN: cfsm_vsm,
CLF: cfsm_clf
}
datas = []
for algo in {LNBNN, CLF}:
cfms = algo_confusions[algo]
data = {
'dbname': dbname,
'species': nice,
'fpr': cfms.fpr,
'tpr': cfms.tpr,
'auc': cfms.auc,
'cmc0': cdfs[algo][0],
'cmc': cdfs[algo],
'color': dbprops[dbname]['color'],
'marker': dbprops[dbname]['marker'],
'tpr@fpr=0': cfms.get_metric_at_metric(
'tpr', 'fpr', 0, tiebreaker='minthresh'),
'thresh@fpr=0': cfms.get_metric_at_metric(
'thresh', 'fpr', 0, tiebreaker='minthresh'),
}
rank_curves[num][algo][dbname] = data
datas.append(data)
# Highlight the bigger one for each metric
highlight_metric('auc', *datas)
highlight_metric('tpr@fpr=0', *datas)
highlight_metric('cmc0', *datas)
rank_auc_tables = ut.ddict(lambda: pd.DataFrame(columns=[LNBNN, CLF]))
rank_tpr_tables = ut.ddict(lambda: pd.DataFrame(columns=[LNBNN, CLF]))
rank_tpr_thresh_tables = ut.ddict(lambda: pd.DataFrame(columns=[LNBNN, CLF]))
for num in rank_curves.keys():
rank_auc_df = rank_auc_tables[num]
rank_auc_df.index.name = 'AUC@rank<={}'.format(num)
rank_tpr_df = rank_tpr_tables[num]
rank_tpr_df.index.name = 'tpr@fpr=0&rank<={}'.format(num)
rank_thesh_df = rank_tpr_thresh_tables[num]
rank_thesh_df.index.name = 'thresh@fpr=0&rank<={}'.format(num)
for algo in rank_curves[num].keys():
for dbname in rank_curves[num][algo].keys():
data = rank_curves[num][algo][dbname]
nice = data['species']
rank_auc_df.loc[nice, algo] = data['auc']
rank_tpr_df.loc[nice, algo] = data['tpr@fpr=0']
rank_thesh_df.loc[nice, algo] = data['thresh@fpr=0']
from utool.experimental.pandas_highlight import to_string_monkey
nums = [1]
for rank in nums:
print('-----')
print('AUC at rank = {!r}'.format(rank))
rank_auc_df = rank_auc_tables[rank]
print(to_string_monkey(rank_auc_df, 'all'))
print('===============')
for rank in nums:
print('-----')
print('TPR at rank = {!r}'.format(rank))
rank_tpr_df = rank_tpr_tables[rank]
print(to_string_monkey(rank_tpr_df, 'all'))
def _bf_best(df):
df = df.copy()
for rx in range(len(df)):
col = df.iloc[rx]
for cx in ut.argmax(col.values, multi=True):
val = df.iloc[rx, cx]
df.iloc[rx, cx] = '\\mathbf{{{:.3f}}}'.format(val)
return df
if True:
# Tables
rank1_auc_table = rank_auc_tables[1]
rank1_tpr_table = rank_tpr_tables[1]
# all_stats = pd.concat(ut.emap(_bf_best, [auc_table, rank1_cmc_table, rank5_cmc_table]), axis=1)
column_parts = [
('Rank $1$ AUC', rank1_auc_table),
('Rank $1$ TPR', rank1_tpr_table),
('Pos. @ Rank $1$', rank1_cmc_table),
]
all_stats = pd.concat(ut.emap(
_bf_best, ut.take_column(column_parts, 1)), axis=1)
all_stats.index.name = None
colfmt = 'l|' + '|'.join(['rr'] * len(column_parts))
multi_header = (
[None] +
[(2, 'c|', name) for name in ut.take_column(column_parts, 0)[0:-1]] +
[(2, 'c', name) for name in ut.take_column(column_parts, 0)[-1:]]
)
from ibeis.scripts import _thesis_helpers
tabular = _thesis_helpers.Tabular(
all_stats, colfmt=colfmt, escape=False)
tabular.add_multicolumn_header(multi_header)
tabular.precision = 3
tex_text = tabular.as_tabular()
# HACKS
import re
num_pat = ut.named_field('num', '[0-9]*\.?[0-9]*')
tex_text = re.sub(re.escape('\\mathbf{$') + num_pat + re.escape('$}'),
'$\\mathbf{' + ut.bref_field('num') + '}$',
tex_text)
print(tex_text)
# tex_text = tex_text.replace('\\mathbf{$', '$\\mathbf{')
# tex_text = tex_text.replace('$}', '}$')
ut.write_to(join(VerifierExpt.base_dpath, 'agg-results-all.tex'), tex_text)
_ = ut.render_latex(tex_text, dpath=VerifierExpt.base_dpath,
fname='agg-results-all',
preamb_extra=['\\usepackage{makecell}'])
# ut.startfile(_)
if True:
# Tables
rank1_auc_table = rank_auc_tables[1]
rank1_tpr_table = rank_tpr_tables[1]
print('\nrank1_auc_table =\n{}'.format(to_string_monkey(rank1_auc_table, 'all')))
print('\nrank1_tpr_table =\n{}'.format(to_string_monkey(rank1_tpr_table, 'all')))
print('\nrank1_cmc_table =\n{}'.format(to_string_monkey(rank1_cmc_table, 'all')))
# Tables
rank1_auc_table = rank_auc_tables[1]
rank1_tpr_table = rank_tpr_tables[1]
# all_stats = pd.concat(ut.emap(_bf_best, [auc_table, rank1_cmc_table, rank5_cmc_table]), axis=1)
column_parts = [
('Rank $1$ AUC', rank1_auc_table),
# ('Rank $1$ TPR', rank1_tpr_table),
('Pos. @ Rank $1$', rank1_cmc_table),
]
all_stats = pd.concat(ut.emap(
_bf_best, ut.take_column(column_parts, 1)), axis=1)
all_stats.index.name = None
colfmt = 'l|' + '|'.join(['rr'] * len(column_parts))
multi_header = (
[None] +
[(2, 'c|', name) for name in ut.take_column(column_parts, 0)[0:-1]] +
[(2, 'c', name) for name in ut.take_column(column_parts, 0)[-1:]]
)
from ibeis.scripts import _thesis_helpers
tabular = _thesis_helpers.Tabular(
all_stats, colfmt=colfmt, escape=False)
tabular.add_multicolumn_header(multi_header)
tabular.precision = 3
tex_text = tabular.as_tabular()
# HACKS
import re
num_pat = ut.named_field('num', '[0-9]*\.?[0-9]*')
tex_text = re.sub(re.escape('\\mathbf{$') + num_pat + re.escape('$}'),
'$\\mathbf{' + ut.bref_field('num') + '}$',
tex_text)
print(tex_text)
print(tex_text)
# tex_text = tex_text.replace('\\mathbf{$', '$\\mathbf{')
# tex_text = tex_text.replace('$}', '}$')
ut.write_to(join(VerifierExpt.base_dpath, 'agg-results.tex'), tex_text)
_ = ut.render_latex(tex_text, dpath=VerifierExpt.base_dpath,
fname='agg-results',
preamb_extra=['\\usepackage{makecell}'])
_
# ut.startfile(_)
method = 2
if method == 2:
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.unicode'] = True
# mpl.rcParams['axes.labelsize'] = 12
mpl.rcParams['legend.fontsize'] = 12
mpl.rcParams['xtick.color'] = 'k'
mpl.rcParams['ytick.color'] = 'k'
mpl.rcParams['axes.labelcolor'] = 'k'
# mpl.rcParams['text.color'] = 'k'
nums = [1, np.inf]
nums = [1]
for num in nums:
chunked_dbnames = list(ub.chunks(dbnames, 2))
for fnum, dbname_chunk in enumerate(chunked_dbnames, start=1):
fig = pt.figure(fnum=fnum) # NOQA
fig.clf()
ax = pt.gca()
for dbname in dbname_chunk:
data1 = rank_curves[num][CLF][dbname]
data2 = rank_curves[num][LNBNN][dbname]
data1['label'] = 'TPR=${tpr}$ {algo} {species}'.format(
algo=CLF,
tpr=data1['tpr@fpr=0_tex'], species=data1['species'])
data1['ls'] = '-'
data1['chunk_marker'] = '^'
data1['color'] = dbprops[dbname]['color']
data2['label'] = 'TPR=${tpr}$ {algo} {species}'.format(
algo=LNBNN,
tpr=data2['tpr@fpr=0_tex'], species=data2['species'])
data2['ls'] = '--'
data2['chunk_marker'] = 'v'
data2['color'] = dbprops[dbname]['color']
for d in [data1, data2]:
ax.plot(d['fpr'], d['tpr'], d['ls'],
color=d['color'], zorder=10)
for d in [data1, data2]:
ax.plot(0, d['tpr@fpr=0'], d['ls'],
marker=d['chunk_marker'],
markeredgecolor='k', markersize=8,
# fillstyle='none',
color=d['color'],
label=d['label'], zorder=100)
ax.set_xlabel('false positive rate')
ax.set_ylabel('true positive rate')
ax.set_ylim(0, 1)
ax.set_xlim(-.05, .5)
# ax.set_title('ROC with ranks $<= {}$'.format(num))
ax.legend(loc='lower right')
pt.adjust_subplots(top=.8, bottom=.2, left=.12, right=.9)
fig.set_size_inches([W * .7, H])
fname = 'agg_roc_rank_{}_chunk_{}_{}.png'.format(num, fnum,
task_key)
fig_fpath = join(str(VerifierExpt.base_dpath), fname)
vt.imwrite(fig_fpath, pt.render_figure_to_image(fig, dpi=DPI))
chunked_dbnames = list(ub.chunks(dbnames, 2))
for fnum, dbname_chunk in enumerate(chunked_dbnames, start=1):
fig = pt.figure(fnum=fnum) # NOQA
fig.clf()
ax = pt.gca()
for dbname in dbname_chunk:
data1 = rank_curves[num][CLF][dbname]
data2 = rank_curves[num][LNBNN][dbname]
data1['label'] = 'pos@rank1=${cmc0}$ {algo} {species}'.format(
algo=CLF,
cmc0=data1['cmc0_tex'], species=data1['species'])
data1['ls'] = '-'
data1['chunk_marker'] = '^'
data1['color'] = dbprops[dbname]['color']
data2['label'] = 'pos@rank1=${cmc0}$ {algo} {species}'.format(
algo=LNBNN, cmc0=data2['cmc0_tex'],
species=data2['species'])
data2['ls'] = '--'
data2['chunk_marker'] = 'v'
data2['color'] = dbprops[dbname]['color']
for d in [data1, data2]:
ax.plot(d['fpr'], d['tpr'], d['ls'],
color=d['color'])
for d in [data1, data2]:
ax.plot(d['cmc'], d['ls'],
# marker=d['chunk_marker'],
# markeredgecolor='k',
# markersize=8,
# fillstyle='none',
color=d['color'],
label=d['label'])
ax.set_xlabel('rank')
ax.set_ylabel('match probability')
ax.set_ylim(0, 1)
ax.set_xlim(1, 20)
ax.set_xticks([1, 5, 10, 15, 20])
# ax.set_title('ROC with ranks $<= {}$'.format(num))
ax.legend(loc='lower right')
pt.adjust_subplots(top=.8, bottom=.2, left=.12, right=.9)
fig.set_size_inches([W * .7, H])
fname = 'agg_cmc_chunk_{}_{}.png'.format(fnum, task_key)
fig_fpath = join(str(VerifierExpt.base_dpath), fname)
vt.imwrite(fig_fpath, pt.render_figure_to_image(fig, dpi=DPI))
if method == 1:
# Does going from rank 1 to rank inf generally improve deltas?
# -rank_tpr_tables[np.inf].diff(axis=1) - -rank_tpr_tables[1].diff(axis=1)
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.unicode'] = True
# mpl.rcParams['axes.labelsize'] = 12
mpl.rcParams['legend.fontsize'] = 12
mpl.rcParams['xtick.color'] = 'k'
mpl.rcParams['ytick.color'] = 'k'
mpl.rcParams['axes.labelcolor'] = 'k'
# mpl.rcParams['text.color'] = 'k'
def method1_roc(roc_curves, algo, other):
ax = pt.gca()
for dbname in dbnames:
data = roc_curves[algo][dbname]
ax.plot(data['fpr'], data['tpr'], color=data['color'])
for dbname in dbnames:
data = roc_curves[algo][dbname]
other_data = roc_curves[other][dbname]
other_tpr = other_data['tpr@fpr=0']
species = data['species']
tpr = data['tpr@fpr=0']
tpr_text = '{:.3f}'.format(tpr)
if tpr >= other_tpr:
if mpl.rcParams['text.usetex']:
tpr_text = '\\mathbf{' + tpr_text + '}'
else:
tpr_text = tpr_text + '*'
label = 'TPR=${tpr}$ {species}'.format(
tpr=tpr_text, species=species)
ax.plot(0, data['tpr@fpr=0'], marker=data['marker'],
label=label, color=data['color'])
if algo:
algo = algo.rstrip() + ' '
algo = ''
ax.set_xlabel(algo + 'false positive rate')
ax.set_ylabel('true positive rate')
ax.set_ylim(0, 1)
ax.set_xlim(-.005, .5)
# ax.set_title('%s ROC for %s' % (target_class.title(), self.species))
ax.legend(loc='lower right')
pt.adjust_subplots(top=.8, bottom=.2, left=.12, right=.9)
fig.set_size_inches([W * .7, H])
nums = [1, np.inf]
# nums = [1]
for num in nums:
algos = {CLF, LNBNN}
for fnum, algo in enumerate(algos, start=1):
roc_curves = rank_curves[num]
other = next(iter(algos - {algo}))
fig = pt.figure(fnum=fnum) # NOQA
method1_roc(roc_curves, algo, other)
fname = 'agg_roc_rank_{}_{}_{}.png'.format(num, algo, task_key)
fig_fpath = join(str(VerifierExpt.base_dpath), fname)
vt.imwrite(fig_fpath, pt.render_figure_to_image(fig, dpi=DPI))
# -------------
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.unicode'] = True
mpl.rcParams['xtick.color'] = 'k'
mpl.rcParams['ytick.color'] = 'k'
mpl.rcParams['axes.labelcolor'] = 'k'
mpl.rcParams['text.color'] = 'k'
def method1_cmc(cmc_curves):
ax = pt.gca()
color_cycle = mpl.rcParams['axes.prop_cycle'].by_key()['color']
markers = pt.distinct_markers(len(cmc_curves))
for data, marker, color in zip(cmc_curves.values(), markers, color_cycle):
species = data['species']
if mpl.rcParams['text.usetex']:
cmc0_text = data['cmc0_tex']
label = 'pos@rank1=${}$ {species}'.format(
cmc0_text, species=species)
else:
cmc0_text = data['cmc0_text']
label = 'pos@rank1={} {species}'.format(
cmc0_text, species=species)
ranks = np.arange(1, len(data['cmc']) + 1)
ax.plot(ranks, data['cmc'], marker=marker, color=color,
label=label)
ax.set_xlabel('rank')
ax.set_ylabel('match probability')
ax.set_ylim(0, 1)
ax.set_xlim(1, 20)
ax.set_xticks([1, 5, 10, 15, 20])
# ax.set_title('%s ROC for %s' % (target_class.title(), self.species))
ax.legend(loc='lower right')
pt.adjust_subplots(top=.8, bottom=.2, left=.12, right=.9)
fig.set_size_inches([W * .7, H])
fig = pt.figure(fnum=1) # NOQA
# num doesnt actually matter here
num = 1
cmc_curves = rank_curves[num][CLF]
method1_cmc(cmc_curves)
fname = 'agg_cmc_clf_{}.png'.format(task_key)
fig_fpath = join(str(VerifierExpt.base_dpath), fname)
vt.imwrite(fig_fpath, pt.render_figure_to_image(fig, dpi=DPI))
fig = pt.figure(fnum=2) # NOQA
cmc_curves = rank_curves[num][LNBNN]
method1_cmc(cmc_curves)
fname = 'agg_cmc_lnbnn_{}.png'.format(task_key)
fig_fpath = join(str(VerifierExpt.base_dpath), fname)
vt.imwrite(fig_fpath, pt.render_figure_to_image(fig, dpi=DPI))
if True:
# Agg metrics
agg_y_pred = []
agg_y_true = []
agg_sample_weight = []
agg_class_names = None
for dbname, results in all_results.items():
task_combo_res = results['task_combo_res']
res = task_combo_res[task_key][clf_key][data_key]
res.augment_if_needed()
y_true = res.y_test_enc
incmp_enc = ut.aslist(res.class_names).index(INCMP)
if sum(y_true == incmp_enc) < 500:
continue
# Find auto thresholds
print('-----')
print('dbname = {!r}'.format(dbname))
for k in range(res.y_test_bin.shape[1]):
class_k_truth = res.y_test_bin.T[k]
class_k_probs = res.clf_probs.T[k]
cfms_ovr = vt.ConfusionMetrics().fit(class_k_probs, class_k_truth)
# auc = sklearn.metrics.roc_auc_score(class_k_truth, class_k_probs)
state = res.class_names[k]
# for state, cfms_ovr in res.confusions_ovr():
if state == POSTV:
continue
tpr = cfms_ovr.get_metric_at_metric(
'tpr', 'fpr', 0, tiebreaker='minthresh')
# thresh = cfsm_scores_rank.get_metric_at_metric(
# 'thresh', 'fpr', 0, tiebreaker='minthresh')
print('state = {!r}'.format(state))
print('tpr = {:.3f}'.format(tpr))
print('+--')
print('-----')
# aggregate results
y_pred = res.clf_probs.argmax(axis=1)
agg_y_true.extend(y_true.tolist())
agg_y_pred.extend(y_pred.tolist())
agg_sample_weight.extend(res.sample_weight.tolist())
assert (agg_class_names is None or
agg_class_names == res.class_names), (
'classes are inconsistent')
agg_class_names = res.class_names
from ibeis.algo.verif import sklearn_utils
agg_report = sklearn_utils.classification_report2(
agg_y_true, agg_y_pred, agg_class_names, agg_sample_weight,
verbose=False)
metric_df = agg_report['metrics']
confusion_df = agg_report['confusion']
# multiclass_mcc = agg_report['mcc']
# df.loc['combined', 'MCC'] = multiclass_mcc
multiclass_mcc = agg_report['mcc']
metric_df.loc['combined', 'mcc'] = multiclass_mcc
print(metric_df)
print(confusion_df)
dpath = str(self.base_dpath)
confusion_fname = 'agg_confusion_{}'.format(task_key)
metrics_fname = 'agg_eval_metrics_{}'.format(task_key)
# df = self.task_confusion[task_key]
df = confusion_df.copy()
df = df.rename_axis(self.task_nice_lookup[task_key], 0)
df = df.rename_axis(self.task_nice_lookup[task_key], 1)
df.columns.name = None
df.index.name = 'Real'
colfmt = '|l|' + 'r' * (len(df) - 1) + '|l|'
tabular = Tabular(df, colfmt=colfmt, hline=True)
tabular.groupxs = [list(range(len(df) - 1)), [len(df) - 1]]
tabular.add_multicolumn_header([None, (3, 'c|', 'Predicted'), None])
latex_str = tabular.as_tabular()
sum_pred = df.index[-1]
sum_real = df.columns[-1]
latex_str = latex_str.replace(sum_pred, r'$\sum$ predicted')
latex_str = latex_str.replace(sum_real, r'$\sum$ real')
confusion_tex = ut.align(latex_str, '&', pos=None)
print(confusion_tex)
ut.render_latex(confusion_tex, dpath=self.base_dpath,
fname=confusion_fname)
df = metric_df
# df = self.task_metrics[task_key]
df = df.rename_axis(self.task_nice_lookup[task_key], 0)
df = df.rename_axis({'mcc': 'MCC'}, 1)
df = df.rename_axis({'combined': 'Combined'}, 1)
df = df.drop(['markedness', 'bookmaker', 'fpr'], axis=1)
df.index.name = None
df.columns.name = None
df['support'] = df['support'].astype(np.int)
df.columns = ut.emap(upper_one, df.columns)
import re
tabular = Tabular(df, colfmt='numeric')
top, header, mid, bot = tabular.as_parts()
lines = mid[0].split('\n')
newmid = [lines[0:-1], lines[-1:]]
tabular.parts = (top, header, newmid, bot)
latex_str = tabular.as_tabular()
latex_str = re.sub(' -0.00 ', ' 0.00 ', latex_str)
metrics_tex = latex_str
print(metrics_tex)
confusion_tex = confusion_tex.replace('Incomparable', 'Incomp.')
confusion_tex = confusion_tex.replace('predicted', 'pred')
metrics_tex = metrics_tex.replace('Incomparable', 'Incomp.')
ut.write_to(join(dpath, confusion_fname + '.tex'), confusion_tex)
ut.write_to(join(dpath, metrics_fname + '.tex'), metrics_tex)
ut.render_latex(confusion_tex, dpath=dpath, fname=confusion_fname)
ut.render_latex(metrics_tex, dpath=dpath, fname=metrics_fname)
old_cmc = rank1_cmc_table[LNBNN]
new_cmc = rank1_cmc_table[CLF]
cmc_diff = new_cmc - old_cmc
cmc_change = cmc_diff / old_cmc
improved = cmc_diff > 0
print('{} / {} datasets saw CMC improvement'.format(sum(improved),
len(cmc_diff)))
print('CMC average absolute diff: {}'.format(cmc_diff.mean()))
print('CMC average percent change: {}'.format(cmc_change.mean()))
print('Average AUC:\n{}'.format(rank1_auc_table.mean(axis=0)))
print('Average TPR:\n{}'.format(rank1_tpr_table.mean(axis=0)))
old_tpr = rank1_tpr_table[LNBNN]
new_tpr = rank1_tpr_table[CLF]
tpr_diff = new_tpr - old_tpr
tpr_change = tpr_diff / old_tpr
improved = tpr_diff > 0
print('{} / {} datasets saw TPR improvement'.format(sum(improved),
len(tpr_diff)))
print('TPR average absolute diff: {}'.format(tpr_diff.mean()))
print('TPR average percent change: {}'.format(tpr_change.mean()))
@profile
def measure_dbstats(self):
"""
python -m ibeis VerifierExpt.measure dbstats GZ_Master1
python -m ibeis VerifierExpt.measure dbstats PZ_Master1
python -m ibeis VerifierExpt.measure dbstats MantaMatcher
python -m ibeis VerifierExpt.measure dbstats RotanTurtles
Ignore:
>>> from ibeis.scripts.postdoc import *
>>> #self = VerifierExpt('GZ_Master1')
>>> self = VerifierExpt('MantaMatcher')
"""
if self.ibs is None:
self._precollect()
ibs = self.ibs
# self.ibs.print_annot_stats(self.aids_pool)
# encattr = 'static_encounter'
encattr = 'encounter_text'
# encattr = 'aids'
annots = ibs.annots(self.aids_pool)
encounters = annots.group2(getattr(annots, encattr))
nids = ut.take_column(encounters.nids, 0)
nid_to_enc = ut.group_items(encounters, nids)
single_encs = {nid: e for nid, e in nid_to_enc.items() if len(e) == 1}
multi_encs = {nid: self.ibs._annot_groups(e)
for nid, e in nid_to_enc.items() if len(e) > 1}
multi_annots = ibs.annots(ut.flatten(ut.flatten(multi_encs.values())))
single_annots = ibs.annots(ut.flatten(ut.flatten(single_encs.values())))
def annot_stats(annots, encattr):
encounters = annots.group2(getattr(annots, encattr))
nid_to_enc = ut.group_items(
encounters, ut.take_column(encounters.nids, 0))
nid_to_nenc = ut.map_vals(len, nid_to_enc)
n_enc_per_name = list(nid_to_nenc.values())
n_annot_per_enc = ut.lmap(len, encounters)
enc_deltas = []
for encs_ in nid_to_enc.values():
times = [np.mean(a.image_unixtimes_asfloat) for a in encs_]
for tup in ut.combinations(times, 2):
delta = max(tup) - min(tup)
enc_deltas.append(delta)
# pass
# delta = times.max() - times.min()
# enc_deltas.append(delta)
annot_info = ut.odict()
annot_info['n_names'] = len(nid_to_enc)
annot_info['n_annots'] = len(annots)
annot_info['n_encs'] = len(encounters)
annot_info['enc_time_deltas'] = ut.get_stats(enc_deltas)
annot_info['n_enc_per_name'] = ut.get_stats(n_enc_per_name)
annot_info['n_annot_per_enc'] = ut.get_stats(n_annot_per_enc)
# print(ut.repr4(annot_info, si=True, nl=1, precision=2))
return annot_info
enc_info = ut.odict()
enc_info['all'] = annot_stats(annots, encattr)
del enc_info['all']['enc_time_deltas']
enc_info['multi'] = annot_stats(multi_annots, encattr)
enc_info['single'] = annot_stats(single_annots, encattr)
del enc_info['single']['n_encs']
del enc_info['single']['n_enc_per_name']
del enc_info['single']['enc_time_deltas']
qual_info = ut.dict_hist(annots.quality_texts)
qual_info['None'] = qual_info.pop('UNKNOWN', 0)
qual_info['None'] += qual_info.pop(None, 0)
view_info = ut.dict_hist(annots.viewpoint_code)
view_info['None'] = view_info.pop('unknown', 0)
view_info['None'] += view_info.pop(None, 0)
info = ut.odict([])
info['species_nice'] = self.species_nice
info['enc'] = enc_info
info['qual'] = qual_info
info['view'] = view_info
print('Annotation Pool DBStats')
print(ut.repr4(info, si=True, nl=3, precision=2))
def _ave_str2(d):
try:
return ave_str(*ut.take(d, ['mean', 'std']))
except Exception:
return 0
outinfo = ut.odict([
('Database', info['species_nice']),
('Annots', enc_info['all']['n_annots']),
('Names (singleton)', enc_info['single']['n_names']),
('Names (resighted)', enc_info['multi']['n_names']),
('Enc per name (resighted)', _ave_str2(enc_info['multi']['n_enc_per_name'])),
('Annots per encounter', _ave_str2(enc_info['all']['n_annot_per_enc'])),
])
info['outinfo'] = outinfo
df = pd.DataFrame([outinfo])
df = df.set_index('Database')
df.index.name = None
df.index = ut.emap(upper_one, df.index)
tabular = Tabular(df, colfmt='numeric')
tabular.theadify = 16
enc_text = tabular.as_tabular()
print(enc_text)
# ut.render_latex(enc_text, dpath=self.dpath, fname='dbstats',
# preamb_extra=['\\usepackage{makecell}'])
# ut.startfile(_)
# expt_name = ut.get_stack_frame().f_code.co_name.replace('measure_', '')
expt_name = 'dbstats'
self.expt_results[expt_name] = info
ut.ensuredir(self.dpath)
ut.save_data(join(self.dpath, expt_name + '.pkl'), info)
return info
def measure_all(self):
r"""
CommandLine:
python -m ibeis VerifierExpt.measure all GZ_Master1,MantaMatcher,RotanTurtles,LF_ALL
python -m ibeis VerifierExpt.measure all GZ_Master1
Ignore:
from ibeis.scripts.postdoc import *
self = VerifierExpt('PZ_MTEST')
self.measure_all()
"""
self._setup()
pblm = self.pblm
expt_name = 'sample_info'
results = {
'graph': pblm.infr.graph,
'aid_pool': self.aids_pool,
'pblm_aids': pblm.infr.aids,
'encoded_labels2d': pblm.samples.encoded_2d(),
'subtasks': pblm.samples.subtasks,
'multihist': pblm.samples.make_histogram(),
}
self.expt_results[expt_name] = results
ut.save_data(join(str(self.dpath), expt_name + '.pkl'), results)
# importance = {
# task_key: pblm.feature_importance(task_key=task_key)
# for task_key in pblm.eval_task_keys
# }
task = pblm.samples['match_state']
scores = pblm.samples.simple_scores['score_lnbnn_1vM']
lnbnn_ranks = pblm.samples.simple_scores['rank_lnbnn_1vM']
y = task.indicator_df[task.default_class_name]
lnbnn_data = pd.concat([scores, lnbnn_ranks, y], axis=1)
results = {
'lnbnn_data': lnbnn_data,
'task_combo_res': self.pblm.task_combo_res,
# 'importance': importance,
'data_key': self.data_key,
'clf_key': self.clf_key,
}
expt_name = 'all'
self.expt_results[expt_name] = results
ut.save_data(join(str(self.dpath), expt_name + '.pkl'), results)
task_key = 'match_state'
self.measure_hard_cases(task_key)
self.measure_dbstats()
self.measure_rerank()
if ut.get_argflag('--draw'):
self.draw_all()
def draw_all(self):
r"""
CommandLine:
python -m ibeis VerifierExpt.draw_all --db PZ_MTEST
python -m ibeis VerifierExpt.draw_all --db PZ_PB_RF_TRAIN
python -m ibeis VerifierExpt.draw_all --db GZ_Master1
python -m ibeis VerifierExpt.draw_all --db PZ_Master1
Example:
>>> from ibeis.scripts.postdoc import *
>>> dbname = ut.get_argval('--db', default='PZ_MTEST')
>>> dbnames = ut.get_argval('--dbs', type_=list, default=[dbname])
>>> for dbname in dbnames:
>>> print('dbname = %r' % (dbname,))
>>> self = VerifierExpt(dbname)
>>> self.draw_all()
"""
results = self.ensure_results('all')
eval_task_keys = set(results['task_combo_res'].keys())
print('eval_task_keys = {!r}'.format(eval_task_keys))
task_key = 'match_state'
if ut.get_argflag('--cases'):
self.draw_hard_cases(task_key)
self.write_sample_info()
self.draw_roc(task_key)
self.draw_rerank()
self.write_metrics(task_key)
self.draw_class_score_hist()
self.draw_mcc_thresh(task_key)
def draw_roc(self, task_key='match_state'):
"""
python -m ibeis VerifierExpt.draw roc GZ_Master1 photobomb_state
python -m ibeis VerifierExpt.draw roc GZ_Master1 match_state
python -m ibeis VerifierExpt.draw roc PZ_MTEST
"""
mpl.rcParams.update(TMP_RC)
results = self.ensure_results('all')
data_key = results['data_key']
clf_key = results['clf_key']
task_combo_res = results['task_combo_res']
lnbnn_data = results['lnbnn_data']
task_key = 'match_state'
scores = lnbnn_data['score_lnbnn_1vM'].values
y = lnbnn_data[POSTV].values
# task_key = 'match_state'
target_class = POSTV
res = task_combo_res[task_key][clf_key][data_key]
cfsm_vsm = vt.ConfusionMetrics().fit(scores, y)
cfsm_clf = res.confusions(target_class)
roc_curves = [
{'label': LNBNN, 'fpr': cfsm_vsm.fpr, 'tpr': cfsm_vsm.tpr, 'auc': cfsm_vsm.auc},
{'label': CLF, 'fpr': cfsm_clf.fpr, 'tpr': cfsm_clf.tpr, 'auc': cfsm_clf.auc},
]
rank_clf_roc_curve = ut.ddict(list)
rank_lnbnn_roc_curve = ut.ddict(list)
roc_info_lines = []
# Check the ROC for only things in the top of the LNBNN ranked lists
if True:
rank_auc_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 3
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from scipy.cluster import hierarchy
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster, set_link_color_palette
from scipy.spatial.distance import pdist
def compute_feature_importance(resistance_data, run_RF=False):
"""Predict resistance profiles from expression profiles and
raise important genes for the Random Forest regression.
When run_RF=False, the already filtered expression data is used."""
if run_RF:
expression = pd.read_excel('./PATH_TO_TABLES3/Table S3. Transcriptome data of evolved strains.xlsx', index_col=0, skiprows=1)
X = expression.T.iloc[:-4, :] # exclude the parent strains from the feature matrix X
y = resistance_data.reindex(X.index)
# Random Forest regression using hyperparameters defined by grid search
RF_reg = RandomForestRegressor(n_estimators=300, max_depth=18, random_state=42)
RF_reg.fit(X, y)
df = expression.iloc[np.argsort(RF_reg.feature_importances_)[::-1],:].T # sort genes based on its importance
df.to_pickle('./filtered_expression.pkl')
else:
df = pd.read_pickle('./data/filtered_expression.pkl')
return df
def create_dendrogram(df_pca, plot_figure=False):
"""Hierarchical clustering in the supervised PCA space."""
linked = linkage(pdist(df_pca[:-4],metric='euclidean'), 'ward', optimal_ordering=False)
leave_array = hierarchy.leaves_list(linked)
leave_array = leave_array[leave_array<192]
strain_names = df_pca.index.tolist()[:-4]
strain_h = [strain_names[leave_array[i]] for i in range(len(leave_array))]
hierarchy2 = fcluster(linked, t=15, criterion='maxclust') # cluster number is set to 15
if plot_figure:
color_list = ['#035ea2']*11
hierarchy.set_link_color_palette(color_list)
plt.figure(figsize=(40,3))
ax = plt.axes()
dn = dendrogram(linked,color_threshold=4.4835,labels=df_pca.index[:-4],#distance_sort='descending',
above_threshold_color='grey',leaf_rotation=90,get_leaves=True,distance_sort=True,
leaf_font_size=8
)
plt.yticks([])
for j,strain_ in enumerate(strain_h):
plt.text((j+0.15)/len(strain_h),-0.008,s=strain_,fontsize=13,
color ='black',
transform=ax.transAxes,verticalalignment='top',rotation=90,
weight='normal')
plt.axis('off')
#plt.savefig('FigS3_A.pdf', dpi=400, bbox_inches='tight')
plt.show()
return strain_h, hierarchy2
def plot_resistance(resistance_data, strain_h):
# hierarchical clustering of stresses
custom_cmap = sns.diverging_palette(252,18,s=99,l=52,sep=10,center='light',as_cmap=True)
# sorting of strains based on the hierarchical clustering in the supervised PCA space
stress_order = list(np.load('./data/stress_list_MAGE_order.npy')) # order of stresses based on mutant strain analysis
cl_resistance = resistance_data.reindex(strain_h).T
cl_resistance = cl_resistance.reindex(stress_order)
fig = plt.figure(figsize=(40,14))
ax = plt.axes()
mic_map = ax.imshow(cl_resistance, aspect=1.36, cmap=custom_cmap, clim=(-3,3))
cb = fig.colorbar(mic_map, extend='both', orientation='horizontal',ticks=[-3,3],aspect=3,
shrink=0.04,use_gridspec=False,anchor=(0.855,1.45))
cb.ax.set_xticklabels(cb.ax.get_xticklabels(), fontsize=25)
for j,stress_ in enumerate(stress_order):
plt.text(-0.005, (len(stress_order)-j-0.8)/len(stress_order), s=stress_,fontsize=13,
color ='black', transform=ax.transAxes, horizontalalignment='right', weight='normal')
plt.xlim(-0.5,191.6)
plt.axis('off')
for j,strain_ in enumerate(strain_h):
plt.text((j+0.15)/len(strain_h), -0.005, s=strain_, fontsize=10,
color ='black', transform=ax.transAxes,verticalalignment='top',rotation=90,
weight='normal')
#plt.savefig('Fig3_C.pdf', dpi=400, bbox_inches='tight')
plt.show()
resistance_data = | pd.read_csv('./data/resistance_norm.csv', index_col=0) | pandas.read_csv |
import threading
import time
import numpy as np
from brainflow.board_shim import BoardShim, BrainFlowInputParams, BoardIds
import pandas as pd
import tkinter as tk
from tkinter import filedialog
from queue import Queue
from threading import Thread
import streamlit as st
from streamlit.scriptrunner import add_script_run_ctx
class Client():
def __init__(self, datatype):
self.params = BrainFlowInputParams()
self.params.serial_port = 'com3'
self.params.board_id = 0
self.board = BoardShim(0, self.params)
self.datatype = datatype
self.file_path = None
self.fake_matrix = None
self.df = None
self.fake_df = None
self.times_to_go_over = 0
def collect_data(self, datatype):
if datatype == 'real':
start_real = Real(self)
start_real.collect_data_live()
else:
start_fake = Fake(self)
self.file_path = start_fake.choose_file()
self.fake_matrix = start_fake.read_file()
self.times_to_go_over = start_fake.passes_calc()
return self.fake_matrix, self.times_to_go_over
def real_data_collection(self):
start_real = Real(self)
m = start_real.read_data()
for i in range(10):
time.sleep(1)
d = start_real.read_data()
m = np.append(m, d, axis=1)
return m
class Real(Client):
pass
def start_stream(self):
self.board.prepare_session()
self.board.start_stream()
def read_data(self):
data = self.board.get_board_data()
return data
def stop_stream(self):
self.board.stop_stream()
self.board.release_session()
class Fake(Client):
def choose_file(self):
root = tk.Tk()
root.withdraw()
self.file_path = filedialog.askopenfilename()
return self.file_path
def read_file(self):
self.df = pd.read_csv(self.file_path, sep=" ", header=None,
names=["samples", "channel 1", "channel 2", "channel 3",
"channel 4", "channel 5"])
return self.df
def passes_calc(self):
rows = len(self.df.index)
self.times_to_go_over = int(np.floor(rows / 256))
return self.times_to_go_over
def the_data(datatype, out_q):
if datatype == 'real':
start_real = Real(datatype)
start_real.start_stream()
counter = 0
time.sleep(1)
while counter < 600:
d = start_real.read_data()
A = pd.DataFrame(d)
A = A.transpose()
out_q.put(A)
counter += 1
if datatype == 'fake':
fake_matrix = Client(datatype)
the_fake_matrix, passes = fake_matrix.collect_data(datatype)
time.sleep(1)
for i in range(passes):
temp_df = the_fake_matrix[i * 256:i * 256 + 256]
out_q.put(temp_df)
def get_all_queue_result(queue):
result_list = []
while not queue.empty():
result_list.append(queue.get())
return result_list
def testing_queue(in_q, all_data):
while True:
time.sleep(5)
temporary_df = pd.DataFrame()
for i in range(in_q.qsize()):
temporary_df = pd.concat([temporary_df, in_q.get()])
all_data = | pd.concat([all_data, temporary_df], axis=0) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import sqlite3 as sql
def eda_preprocessing():
#database= "AIAP/data/score.db"
database = "data/score.db"
connection =sql.connect(database)
query = '''SELECT * FROM score'''
df = | pd.read_sql_query(query,connection) | pandas.read_sql_query |
# coding: utf-8
# # Finding Donors for *CharityML*
# CharityML is a fictitious charity organization located in the heart of Silicon Valley that was established to provide financial support for people eager to learn machine learning. After nearly 32,000 letters were sent to people in the community, CharityML determined that every donation they received came from someone that was making more than $50,000 annually. To expand their potential donor base, CharityML has decided to send letters to residents of California, but to only those most likely to donate to the charity. With nearly 15 million working Californians, CharityML has brought you on board to help build an algorithm to best identify potential donors and reduce overhead cost of sending mail. Your goal will be evaluate and optimize several different supervised learners to determine which algorithm will provide the highest donation yield while also reducing the total number of letters being sent.
# ## Credits
#
# This notebook is a part of my learning path based on [Data Scientist Nanodegree Program](https://eu.udacity.com/course/data-scientist-nanodegree--nd025) enrollment.
# ## Getting Started
#
# In this project, you will employ several supervised algorithms of your choice to accurately model individuals' income using data collected from the 1994 U.S. Census. You will then choose the best candidate algorithm from preliminary results and further optimize this algorithm to best model the data. Your goal with this implementation is to construct a model that accurately predicts whether an individual makes more than $50,000. This sort of task can arise in a non-profit setting, where organizations survive on donations. Understanding an individual's income can help a non-profit better understand how large of a donation to request, or whether or not they should reach out to begin with. While it can be difficult to determine an individual's general income bracket directly from public sources, we can (as we will see) infer this value from other publically available features.
#
# The dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Census+Income). The datset was donated by <NAME> and <NAME>, after being published in the article _"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid"_. You can find the article by <NAME> [online](https://www.aaai.org/Papers/KDD/1996/KDD96-033.pdf). The data we investigate here consists of small changes to the original dataset, such as removing the `'fnlwgt'` feature and records with missing or ill-formatted entries.
# ----
# ## Exploring the Data
# Run the code cell below to load necessary Python libraries and load the census data. Note that the last column from this dataset, `'income'`, will be our target label (whether an individual makes more than, or at most, $50,000 annually). All other columns are features about each individual in the census database.
# In[2]:
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from time import time
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualization code visuals.py
import scripts.visuals as vs
# Pretty display for notebooks
get_ipython().run_line_magic('matplotlib', 'inline')
# Load the Census dataset
data = pd.read_csv("../../data/census.csv")
# Success - Display the first record
display(data.head(n=1))
# ### Implementation: Data Exploration
# A cursory investigation of the dataset will determine how many individuals fit into either group, and will tell us about the percentage of these individuals making more than \$50,000. In the code cell below, you will need to compute the following:
# - The total number of records, `'n_records'`
# - The number of individuals making more than \$50,000 annually, `'n_greater_50k'`.
# - The number of individuals making at most \$50,000 annually, `'n_at_most_50k'`.
# - The percentage of individuals making more than \$50,000 annually, `'greater_percent'`.
#
# ** HINT: ** You may need to look at the table above to understand how the `'income'` entries are formatted.
# In[2]:
# TODO: Total number of records
n_records = data.shape[0]
# TODO: Number of records where individual's income is more than $50,000
n_greater_50k = data[data["income"] == ">50K"].shape[0]
# TODO: Number of records where individual's income is at most $50,000
n_at_most_50k = data[data["income"] == "<=50K"].shape[0]
# TODO: Percentage of individuals whose income is more than $50,000
greater_percent = n_greater_50k/n_records * 100
# Print the results
print("Total number of records: {}".format(n_records))
print("Individuals making more than $50,000: {}".format(n_greater_50k))
print("Individuals making at most $50,000: {}".format(n_at_most_50k))
print("Percentage of individuals making more than $50,000: {}%".format(greater_percent))
# It is clear that the 2 classes (individuals with income > \$50k = 11208 and individuals with income atmost \$50k = 34014) are imbalanced. Please check this [link](https://www.quora.com/In-classification-how-do-you-handle-an-unbalanced-training-set) to understand how to deal with imbalanced data.
# ** Featureset Exploration **
#
# * **age**: continuous.
# * **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
# * **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
# * **education-num**: continuous.
# * **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
# * **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
# * **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
# * **race**: Black, White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other.
# * **sex**: Female, Male.
# * **capital-gain**: continuous.
# * **capital-loss**: continuous.
# * **hours-per-week**: continuous.
# * **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
# ----
# ## Preparing the Data
# Before data can be used as input for machine learning algorithms, it often must be cleaned, formatted, and restructured — this is typically known as **preprocessing**. Fortunately, for this dataset, there are no invalid or missing entries we must deal with, however, there are some qualities about certain features that must be adjusted. This preprocessing can help tremendously with the outcome and predictive power of nearly all learning algorithms.
# ### Transforming Skewed Continuous Features
# A dataset may sometimes contain at least one feature whose values tend to lie near a single number, but will also have a non-trivial number of vastly larger or smaller values than that single number. Algorithms can be sensitive to such distributions of values and can underperform if the range is not properly normalized. With the census dataset two features fit this description: '`capital-gain'` and `'capital-loss'`.
#
# Run the code cell below to plot a histogram of these two features. Note the range of the values present and how they are distributed.
# In[3]:
# Split the data into features and target label
income_raw = data['income']
features_raw = data.drop('income', axis = 1)
# Visualize skewed continuous features of original data
vs.distribution(data)
# For highly-skewed feature distributions such as `'capital-gain'` and `'capital-loss'`, it is common practice to apply a <a href="https://en.wikipedia.org/wiki/Data_transformation_(statistics)">logarithmic transformation</a> on the data so that the very large and very small values do not negatively affect the performance of a learning algorithm. Using a logarithmic transformation significantly reduces the range of values caused by outliers. Care must be taken when applying this transformation however: The logarithm of `0` is undefined, so we must translate the values by a small amount above `0` to apply the the logarithm successfully.
#
# Run the code cell below to perform a transformation on the data and visualize the results. Again, note the range of values and how they are distributed.
# In[4]:
# Log-transform the skewed features
skewed = ['capital-gain', 'capital-loss']
features_log_transformed = pd.DataFrame(data = features_raw)
features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
vs.distribution(features_log_transformed, transformed = True)
# ### Normalizing Numerical Features
# In addition to performing transformations on features that are highly skewed, it is often good practice to perform some type of scaling on numerical features. Applying a scaling to the data does not change the shape of each feature's distribution (such as `'capital-gain'` or `'capital-loss'` above); however, normalization ensures that each feature is treated equally when applying supervised learners. Note that once scaling is applied, observing the data in its raw form will no longer have the same original meaning, as exampled below.
#
# Run the code cell below to normalize each numerical feature. We will use [`sklearn.preprocessing.MinMaxScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) for this.
# In[5]:
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data = features_log_transformed)
features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])
# Show an example of a record with scaling applied
display(features_log_minmax_transform.head(n = 5))
# ### Implementation: Data Preprocessing
#
# From the table in **Exploring the Data** above, we can see there are several features for each record that are non-numeric. Typically, learning algorithms expect input to be numeric, which requires that non-numeric features (called *categorical variables*) be converted. One popular way to convert categorical variables is by using the **one-hot encoding** scheme. One-hot encoding creates a _"dummy"_ variable for each possible category of each non-numeric feature. For example, assume `someFeature` has three possible entries: `A`, `B`, or `C`. We then encode this feature into `someFeature_A`, `someFeature_B` and `someFeature_C`.
#
# | | someFeature | | someFeature_A | someFeature_B | someFeature_C |
# | :-: | :-: | | :-: | :-: | :-: |
# | 0 | B | | 0 | 1 | 0 |
# | 1 | C | ----> one-hot encode ----> | 0 | 0 | 1 |
# | 2 | A | | 1 | 0 | 0 |
#
# Additionally, as with the non-numeric features, we need to convert the non-numeric target label, `'income'` to numerical values for the learning algorithm to work. Since there are only two possible categories for this label ("<=50K" and ">50K"), we can avoid using one-hot encoding and simply encode these two categories as `0` and `1`, respectively. In code cell below, you will need to implement the following:
# - Use [`pandas.get_dummies()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html?highlight=get_dummies#pandas.get_dummies) to perform one-hot encoding on the `'features_log_minmax_transform'` data.
# - Convert the target label `'income_raw'` to numerical entries.
# - Set records with "<=50K" to `0` and records with ">50K" to `1`.
# In[6]:
# TODO: One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
features_final = | pd.get_dummies(features_log_minmax_transform) | pandas.get_dummies |
import numpy as np
np.random.seed(1234)
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torch.optim as optim
import argparse
import time
from sklearn.metrics import mean_absolute_error
from scipy.stats import pearsonr
from sklearn.metrics import f1_score, confusion_matrix, accuracy_score,\
classification_report, precision_recall_fscore_support
from sklearn.metrics import precision_score, recall_score, roc_auc_score
from model import BiModel, Model, MaskedNLLLoss
from collections import OrderedDict
import pandas as pd
#from model import AVECModel, MaskedMSELoss
from dataloader import CallHomeDataset
def get_train_valid_sampler(trainset, valid=0.1):
size = len(trainset)
idx = range(size)
split = int(valid*size)
return SubsetRandomSampler(idx[split:]), SubsetRandomSampler(idx[:split])
def get_CallHome_loaders(path, stance, batch_size=32, valid=None, num_workers=0, pin_memory=False, acproject=False, acfset="eGeMAPSv01a"):
devset = CallHomeDataset(path=path, stance=stance, part="dev", acproject=acproject, acfset=acfset)
testset = CallHomeDataset(path=path, stance=stance, part="eval", acproject=acproject, acfset=acfset)
trainset = CallHomeDataset(path=path, stance=stance, part="train", acproject=acproject, acfset=acfset)
train_loader = DataLoader(trainset,
batch_size=batch_size,
#sampler=train_sampler,
shuffle=True,
collate_fn=trainset.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory)
valid_loader = DataLoader(devset,
batch_size=batch_size,
#sampler=valid_sampler,
shuffle=True,
collate_fn=devset.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory)
#testset = AVECDataset(path=path, train=False)
test_loader = DataLoader(testset,
batch_size=batch_size,
collate_fn=testset.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory)
return train_loader, valid_loader, test_loader
def get_AVEC_loaders(path, batch_size=32, valid=None, num_workers=0, pin_memory=False):
trainset = AVECDataset(path=path)
train_sampler, valid_sampler = get_train_valid_sampler(trainset, valid)
train_loader = DataLoader(trainset,
batch_size=batch_size,
sampler=train_sampler,
collate_fn=trainset.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory)
valid_loader = DataLoader(trainset,
batch_size=batch_size,
sampler=valid_sampler,
collate_fn=trainset.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory)
testset = AVECDataset(path=path, train=False)
test_loader = DataLoader(testset,
batch_size=batch_size,
collate_fn=testset.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory)
return train_loader, valid_loader, test_loader
def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, train=False):
losses = []
preds = []
probs = []
labels = []
masks = []
alphas, alphas_f, alphas_b, metas = [], [], [], []
assert not train or optimizer!=None
if train:
model.train()
else:
model.eval()
for data in dataloader:
if train:
optimizer.zero_grad()
# import ipdb;ipdb.set_trace()
## get batch features
textf, acouf, qmask, umask, label, metadf =\
[d.cuda() for d in data] if cuda else data
# [d.cuda() for d in data[:-1]] if cuda else data[:-1]
## qmask is the speakers
## umask is the sequence mask
#log_prob = model(torch.cat((textf,acouf,visuf),dim=-1), qmask,umask) # seq_len, batch, n_classes
log_prob, alpha, alpha_f, alpha_b = model(textf, qmask, umask) # seq_len, batch, n_classes
#print("log prob", log_prob.shape)
lp_ = log_prob.transpose(0,1).contiguous().view(-1,log_prob.size()[2]) # batch*seq_len, n_classes
## predictions are now batch first, but mask is seq first
#print("lp_", lp_.shape)
labels_ = label.view(-1) # batch*seq_len
currmask = umask.transpose(0,1).contiguous() #.view(-1)
#loss = loss_function(lp_, labels_, umask)
loss = loss_function(lp_, labels_, currmask)
pred_ = torch.argmax(lp_,1) # batch*seq_len
preds.append(pred_.data.cpu().numpy())
labels.append(labels_.data.cpu().numpy())
currmask = currmask.view(-1)
#print("currmask", currmask.shape)
#masks.append(umask.view(-1).cpu().numpy())
masks.append(currmask.cpu().numpy())
#print(masks)
#raise SystemExit
probs.append(lp_.data.cpu().numpy())
#print(metadf.shape)
metas.append(metadf.view(-1))
#print(metas[-1].shape)
losses.append(loss.item()*masks[-1].sum())
if train:
loss.backward()
if args.tensorboard:
for param in model.named_parameters():
writer.add_histogram(param[0], param[1].grad, epoch)
optimizer.step()
else:
alphas += alpha
alphas_f += alpha_f
alphas_b += alpha_b
if preds!=[]:
preds = np.concatenate(preds)
probs = np.concatenate(probs)
labels = np.concatenate(labels)
masks = np.concatenate(masks)
metas = np.concatenate(metas)
# print(probs.shape)
# print(metas.shape)
else:
return float('nan'), float('nan'), [], [], [], float('nan'),[]
## Are these the wrong masks?
avg_loss = round(np.sum(losses)/np.sum(masks),4)
avg_accuracy = round(accuracy_score(labels,preds,sample_weight=masks)*100,2)
avg_fscore = round(f1_score(labels,preds,sample_weight=masks,average='weighted')*100,2)
return avg_loss, avg_accuracy, labels, preds, probs, masks, avg_fscore, metas, [alphas, alphas_f, alphas_b]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='does not use GPU')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate')
parser.add_argument('--l2', type=float, default=0.0001, metavar='L2',
help='L2 regularization weight')
parser.add_argument('--rec-dropout', type=float, default=0.0,
metavar='rec_dropout', help='rec_dropout rate')
parser.add_argument('--dropout', type=float, default=0.0, metavar='dropout',
help='dropout rate')
parser.add_argument('--batch-size', type=int, default=128, metavar='BS',
help='batch size')
parser.add_argument('--epochs', type=int, default=100, metavar='E',
help='number of epochs')
parser.add_argument('--active-listener', action='store_true', default=False,
help='active listener')
parser.add_argument('--attention', default='simple', help='Attention type')
parser.add_argument('--class-weight', action='store_true', default=True,
help='class weight')
parser.add_argument('--tensorboard', action='store_true', default=False,
help='Enables tensorboard log')
parser.add_argument('--attribute', type=str , default="Positive", help='CallHome Stance')
parser.add_argument('--encdir', default='/afs/inf.ed.ac.uk/user/c/clai/tunguska/stance2019/h5/encodings/', help='embedding directory')
parser.add_argument('--acproject', action='store_true', default=False,
help='use acoustic encoding with projection layer')
parser.add_argument('--acfset', type=str , default="eGeMAPSv01a", help='base acoustic feature set')
args = parser.parse_args()
print(args)
args.cuda = torch.cuda.is_available() and not args.no_cuda
if args.cuda:
print('Running on GPU')
else:
print('Running on CPU')
if args.tensorboard:
from tensorboardX import SummaryWriter
writer = SummaryWriter()
batch_size = args.batch_size
n_classes = 2
cuda = args.cuda
n_epochs = args.epochs
D_m = 100
D_g = 100
D_p = 100
D_e = 100
D_h = 100
D_a = 100 # concat attention
model = BiModel(D_m, D_g, D_p, D_e, D_h,
n_classes=n_classes,
listener_state=args.active_listener,
context_attention=args.attention,
dropout_rec=args.rec_dropout,
dropout=args.dropout)
if cuda:
model.cuda()
loss_weights = torch.FloatTensor([
1.,
1.
])
if args.class_weight:
loss_function = MaskedNLLLoss(loss_weights.cuda() if cuda else loss_weights)
else:
loss_function = MaskedNLLLoss()
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.l2)
#print(args.attribute)
## basically we need a new data loader
train_loader, valid_loader, test_loader =\
get_CallHome_loaders(args.encdir, args.attribute,
valid=0.0,
batch_size=batch_size,
num_workers=2, acproject=args.acproject, acfset=args.acfset)
best_loss, best_label, best_pred, best_mask = None, None, None, None
## This is doing early stopping on test, not dev?
## Also there's not model saving etc
## Check that it's using eval mode?
patience = 10
for e in range(n_epochs):
start_time = time.time()
if patience == 0:
print("NO MORE PATIENCE", patience)
break
print("EPOCH:", e)
train_loss, train_acc, _,_,_,_,train_fscore, _, _= train_or_eval_model(model, loss_function,
train_loader, e, optimizer, True)
valid_loss, valid_acc, valid_label, valid_pred , valid_probs, valid_mask, val_fscore, val_metas, val_attentions = train_or_eval_model(model, loss_function, valid_loader, e)
test_loss, test_acc, test_label, test_pred, test_probs, test_mask, test_fscore, test_metas, attentions = train_or_eval_model(model, loss_function, test_loader, e)
if best_loss == None or best_valid_loss > valid_loss:
best_loss, best_label, best_pred, best_prob, best_mask, best_metas, best_attn =\
test_loss, test_label, test_pred, test_probs, test_mask, test_metas, attentions
best_valid_loss, best_valid_label, best_valid_pred, best_valid_prob, best_valid_mask, best_valid_metas, best_valid_attn =\
valid_loss, valid_label, valid_pred, valid_probs, valid_mask, val_metas, val_attentions
print('epoch {} train_loss {} train_acc {} train_fscore {} valid_loss {} valid_acc {} val_fscore {} test_loss {} test_acc {} test_fscore {} time {}'.\
format(e, train_loss, train_acc, train_fscore, valid_loss, valid_acc, val_fscore,\
test_loss, test_acc, test_fscore, round(time.time()-start_time,2)))
patience = 10
else:
patience -= 1
print("EPOCH/PATIENCE:", e, patience)
if args.tensorboard:
writer.add_scalar('valid: accuracy/loss',valid_acc/valid_loss,e)
writer.add_scalar('test: accuracy/loss',test_acc/test_loss,e)
writer.add_scalar('train: accuracy/loss',train_acc/train_loss,e)
if args.tensorboard:
writer.close()
print('Dev performance..')
print('Loss {} accuracy {}'.format(best_valid_loss,
round(accuracy_score(best_valid_label,best_valid_pred,sample_weight=best_valid_mask)*100,2)))
print(classification_report(best_valid_label,best_valid_pred,sample_weight=best_valid_mask,digits=4))
print(confusion_matrix(best_valid_label,best_valid_pred,sample_weight=best_valid_mask))
print('Test performance..')
print('Loss {} accuracy {}'.format(best_loss,
round(accuracy_score(best_label,best_pred,sample_weight=best_mask)*100,2)))
print(classification_report(best_label,best_pred,sample_weight=best_mask,digits=4))
print(confusion_matrix(best_label,best_pred,sample_weight=best_mask))
print(np.exp(best_valid_prob))
modname = "BiModel"
Y_dev = best_valid_label
devpred = best_valid_pred
devprob = np.exp(best_valid_prob[:,1])
print(Y_dev.shape, devpred.shape, devprob.shape)
print(f1_score(Y_dev, devpred, sample_weight=best_valid_mask, pos_label=0))
dev_metrics = pd.DataFrame(OrderedDict(
[('classifier','DialogueRNN'), ('p1','turntrans_turnacoustic'), ('p2','X'), ('stance',args.attribute),
('f1',f1_score(Y_dev, devpred, sample_weight=best_valid_mask)),
('weighted_f1',f1_score(Y_dev, devpred, average='weighted', sample_weight=best_valid_mask)),
('precision',precision_score(Y_dev, devpred, sample_weight=best_valid_mask)),
('recall',recall_score(Y_dev, devpred, sample_weight=best_valid_mask)),
('f1_0',f1_score(Y_dev, devpred, sample_weight=best_valid_mask, pos_label=0)),
('weighted_f1_0',f1_score(Y_dev, devpred, average='weighted', sample_weight=best_valid_mask, pos_label=0)),
('precision_0',precision_score(Y_dev, devpred, sample_weight=best_valid_mask, pos_label=0)),
('recall_0',recall_score(Y_dev, devpred, sample_weight=best_valid_mask, pos_label=0)),
('accuracy',accuracy_score(Y_dev, devpred, sample_weight=best_valid_mask)),
('auroc',roc_auc_score(Y_dev, devprob, sample_weight=best_valid_mask))]
), index=[modname])
Y_eval = best_label
evalpred = best_pred
evalprob = np.exp(best_prob[:,1])
eval_metrics = pd.DataFrame(OrderedDict(
[('classifier','DialogueRNN'), ('p1','turntrans_turnacoustic'), ('p2','X'), ('stance',args.attribute),
('f1',f1_score(Y_eval, evalpred, sample_weight=best_mask)),
('weighted_f1',f1_score(Y_eval, evalpred, average='weighted', sample_weight=best_mask)),
('precision',precision_score(Y_eval, evalpred, sample_weight=best_mask)),
('recall',recall_score(Y_eval, evalpred, sample_weight=best_mask)),
('f1_0',f1_score(Y_eval, evalpred, sample_weight=best_mask, pos_label=0)),
('weighted_f1_0',f1_score(Y_eval, evalpred, average='weighted', sample_weight=best_mask, pos_label=0)),
('precision_0',precision_score(Y_eval, evalpred, sample_weight=best_mask, pos_label=0)),
('recall_0',recall_score(Y_eval, evalpred, sample_weight=best_mask, pos_label=0)),
('accuracy',accuracy_score(Y_eval, evalpred, sample_weight=best_mask)),
('auroc',roc_auc_score(Y_eval, evalprob, sample_weight=best_mask))]
), index=[modname])
alstr = "simple"
if args.active_listener:
alstr = "active"
acstr = "noproj"
if args.acproject:
acstr = "acproj"
acstr = acstr + "-" + args.acfset + "-" + str(batch_size)
outdir = "./callhome"
resfile = outdir + "/DialogueRNN." + args.attribute + "." + alstr + "." + acstr + ".metrics.txt"
print(resfile)
print(dev_metrics)
dev_metrics.to_csv(resfile)
resfile = resfile + ".eval"
print(resfile)
print(eval_metrics)
eval_metrics.to_csv(resfile)
predfile = "./preds/BiModel.turntrans_turnacoustic." + args.attribute + "." + alstr + "." + acstr + ".pred.dev.txt"
print(predfile)
preddf0 = pd.DataFrame({'classifier':'BiModel', 'p1':'X', 'xid':best_valid_metas, 'stance':args.attribute, 'pred':best_valid_pred, 'label':best_valid_label, 'mask':best_valid_mask})
devprobdf = pd.DataFrame(np.exp(best_valid_prob), columns=["Pr0", "Pr1"])
preddf = pd.concat([preddf0, devprobdf], axis=1)
preddf.to_csv(predfile, index=False, sep="\t")
predfile = predfile.replace(".dev.", ".eval.") #"./preds/BiModel.turntrans_turnacoustic." + args.attribute + ".pred.eval.txt"
print(predfile)
preddf0 = | pd.DataFrame({'classifier':'BiModel', 'p1':'X', 'xid':best_metas, 'stance':args.attribute, 'pred':best_pred, 'label':best_label, 'mask':best_mask}) | pandas.DataFrame |
# ****************************************************************************
# @man_helpers.py
#
# @copyright 2022 Elektronische Fahrwerksysteme GmbH and Audi AG. All rights reserved.
#
# @license Apache v2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
"""
helper functions for maneuver abstraction.
"""
import pyproj
import simplekml
import math
import pandas as pd
import numpy as np
import os
from typing import Union
from osc_generator.tools.coord_calculations import get_proj_from_open_drive
from osc_generator.tools import rulebased, utils
def convert_maneuvers_to_kml(lat: pd.DataFrame, lon: pd.DataFrame, maneuvers: pd.DataFrame, ego: bool) -> simplekml.Kml:
"""
Convert manuevers from pandas Dataframe to kml
Args:
lat: Latitude
lon: Longitude
maneuvers: Maneuvers of the vehicle
ego: For the ego vehicle True
Returns:
object (simplekml.Kml): Kml file containing the maneuvers
"""
if not isinstance(lat, pd.DataFrame):
raise TypeError("input must be a pd.DataFrame")
if not isinstance(lon, pd.DataFrame):
raise TypeError("input must be a pd.DataFrame")
if not isinstance(maneuvers, pd.DataFrame):
raise TypeError("input must be a pd.DataFrame")
if not isinstance(ego, bool):
raise TypeError("input must be a bool")
kml = simplekml.Kml()
overview_doc = kml.newdocument(name='Overview')
old_lon = 0
old_lat = 0
accelerate = maneuvers['FM_EGO_accelerate']
accelerate_doc = kml.newdocument(name='accelerate')
velocity = maneuvers['FM_EGO_keep_velocity']
velocity_doc = kml.newdocument(name='velocity')
standstill = maneuvers['FM_EGO_standstill']
standstill_doc = kml.newdocument(name='standstill')
decelerate = maneuvers['FM_EGO_decelerate']
decelerate_doc = kml.newdocument(name='decelerate')
lane_change_left = None
lane_change_left_doc = None
lane_change_right = None
lane_change_right_doc = None
if ego is True:
lane_change_left = maneuvers['FM_INF_lane_change_left']
lane_change_left_doc = kml.newdocument(name='lane_change_left')
lane_change_right = maneuvers['FM_INF_lane_change_right']
lane_change_right_doc = kml.newdocument(name='lane_change_right')
for row in range(lat.shape[0]):
if not (pd.isna(lat.loc[row])) or not (pd.isna(lon.loc[row])):
if old_lat != 0 and old_lon != 0:
pathway = overview_doc.newlinestring()
pathway.coords = [(old_lon, old_lat), (lon.loc[row], lat.loc[row])]
pathway.style.linestyle.width = 8
pathway.style.linestyle.color = simplekml.Color.rgb(0, 0, 0)
if accelerate[row] == 1:
pathway = accelerate_doc.newlinestring()
pathway.coords = [(old_lon, old_lat), (lon.loc[row], lat.loc[row])]
pathway.style.linestyle.width = 4
pathway.style.linestyle.color = simplekml.Color.rgb(0, 255, 251)
if velocity[row] == 1:
pathway = velocity_doc.newlinestring()
pathway.coords = [(old_lon, old_lat), (lon.loc[row], lat.loc[row])]
pathway.style.linestyle.width = 4
pathway.style.linestyle.color = simplekml.Color.rgb(0, 143, 255)
if standstill[row] == 1:
pathway = standstill_doc.newlinestring()
pathway.coords = [(old_lon, old_lat), (lon.loc[row], lat.loc[row])]
pathway.style.linestyle.width = 4
pathway.style.linestyle.color = simplekml.Color.rgb(0, 0, 0)
if decelerate[row] == 1:
pathway = decelerate_doc.newlinestring()
pathway.coords = [(old_lon, old_lat), (lon.loc[row], lat.loc[row])]
pathway.style.linestyle.width = 4
pathway.style.linestyle.color = simplekml.Color.rgb(0, 23, 255)
if ego is True:
if lane_change_left[row] == 1:
pathway = lane_change_left_doc.newlinestring()
pathway.coords = [(old_lon, old_lat), (lon.loc[row], lat.loc[row])]
pathway.style.linestyle.width = 4
pathway.style.linestyle.color = simplekml.Color.rgb(255, 169, 59)
if lane_change_right[row] == 1:
pathway = lane_change_right_doc.newlinestring()
pathway.coords = [(old_lon, old_lat), (lon.loc[row], lat.loc[row])]
pathway.style.linestyle.width = 4
pathway.style.linestyle.color = simplekml.Color.rgb(209, 126, 20)
old_lon = lon.loc[row]
old_lat = lat.loc[row]
return kml
def create_speed_model(df_maneuvers: pd.DataFrame, init_speed: float) -> Union[list, np.ndarray]:
"""
Helper function. Extracts speed information from maneuvers.
Args:
df_maneuvers: Maneuvers array
init_speed: Initial speed
Returns:
object (Union[list, np.ndarray]): Modelled speed
"""
if not isinstance(df_maneuvers, pd.DataFrame):
raise TypeError("input must be a pd.DataFrame")
if not isinstance(init_speed, float):
raise TypeError("input must be a float")
speed = []
man_type = [] # 1 for acceleration, -1 for deceleration, 0 for standstill
num_rows, num_cols = df_maneuvers.shape
for i in range(num_rows):
if df_maneuvers.iloc[i].iloc[2] == 'FM_EGO_decelerate':
man_type.append(-1)
elif df_maneuvers.iloc[i].iloc[2] == 'FM_EGO_keep_velocity':
if i == 0:
start_speed = float(init_speed / 3.6)
else:
start_speed = float(df_maneuvers.iloc[i - 1].iloc[5])
if float(df_maneuvers.iloc[i].iloc[5]) >= start_speed:
man_type.append(1)
else:
man_type.append(-1)
elif df_maneuvers.iloc[i].iloc[2] == 'FM_EGO_accelerate':
man_type.append(1)
elif df_maneuvers.iloc[i].iloc[2] == 'FM_EGO_standstill':
man_type.append(0)
accel_res = []
maneuver_len = []
for i in range(num_rows):
accel_res.append(float(df_maneuvers.iloc[i].iloc[6]) * man_type[i])
if i > 0:
maneuver_start = int(df_maneuvers.iloc[i].iloc[0])
maneuver_end = int(df_maneuvers.iloc[i - 1].iloc[1])
if maneuver_end >= maneuver_start:
maneuver_len.append(int(df_maneuvers.iloc[i].iloc[1]) - maneuver_end)
else:
maneuver_len.append(int(df_maneuvers.iloc[i].iloc[1]) + 1 - int(df_maneuvers.iloc[i].iloc[0]))
else:
maneuver_len.append(int(df_maneuvers.iloc[i].iloc[1]) + 1 - int(df_maneuvers.iloc[i].iloc[0]))
start_time = int(df_maneuvers.iloc[0].iloc[0])
delta_t = 0.1
acc_full = []
for i in range(len(maneuver_len)):
for k in range(int(maneuver_len[i])):
acc_full.append(accel_res[i])
for i in range(start_time, start_time + len(acc_full)):
if i == start_time:
speed.append(init_speed)
else:
if start_time == 0:
sub = 1
else:
sub = start_time + 1
speed.append(speed[i - sub] + acc_full[i - sub] * delta_t * 3.6)
speed_np = np.array(speed)
return speed_np
def calc_opt_acc_thresh(df: pd.DataFrame, df_lanes: pd.DataFrame, opendrive_path: str, use_folder: bool,
dir_name: str) -> np.ndarray:
"""
Used to get optimal acceleration threshold to label maneuvers.
Args:
df: Main processed dataframe
df_lanes: Dataframe which contains absolute positions of lanes
opendrive_path: Path to opendrive file
use_folder: Option to create folder structure
dir_name: Name of the folder
Returns:
object (np.ndarray): Optimal acceleration threshold
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("input must be a pd.DataFrame")
if not isinstance(df_lanes, pd.DataFrame):
raise TypeError("input must be a pd.DataFrame")
if not isinstance(opendrive_path, str):
raise TypeError("input must be a str")
if not isinstance(use_folder, bool):
raise TypeError("input must be a bool")
if not isinstance(dir_name, str):
raise TypeError("input must be a str")
movobj_grps_coord = utils.find_vars('lat_|lon_|speed_|class', df.columns, reshape=True)
# Labeling
lane_change_left_array, lane_change_right_array = rulebased.create_lateral_maneuver_vectors(df_lanes, df['lat'], df['long'])
# Array can be extended with more values for acceleration thresholds
acc_thres = np.array([0.05, 0.1, 0.15, 0.2, 0.25, 0.3])
quality_array = np.zeros((len(movobj_grps_coord) + 1, len(acc_thres)))
for x in range(len(acc_thres)):
curr_thres = acc_thres[x]
print('current acceleration threshold: ' + str(curr_thres))
speed = df['speed']
accelerate_array, \
start_array, \
keep_velocity_array, \
standstill_array, \
decelerate_array, \
stop_array, \
reversing_array = rulebased.create_longitudinal_maneuver_vectors(
speed, acceleration_definition_threshold=curr_thres)
# Create df with maneuver info
df_maneuvers = pd.DataFrame(data=None)
df_maneuvers['FM_INF_lane_change_left'] = lane_change_left_array
df_maneuvers['FM_INF_lane_change_right'] = lane_change_right_array
df_maneuvers['FM_EGO_accelerate'] = accelerate_array
df_maneuvers['FM_EGO_start'] = start_array
df_maneuvers['FM_EGO_keep_velocity'] = keep_velocity_array
df_maneuvers['FM_EGO_standstill'] = standstill_array
df_maneuvers['FM_EGO_decelerate'] = decelerate_array
df_maneuvers['FM_EGO_stop'] = stop_array
df_maneuvers['FM_EGO_reversing'] = reversing_array
df_maneuvers_objects = {}
for i in range(len(movobj_grps_coord)):
speed = df[movobj_grps_coord[i][2]]
accelerate_array, \
start_array, \
keep_velocity_array, \
standstill_array, \
decelerate_array, \
stop_array, \
reversing_array = rulebased.create_longitudinal_maneuver_vectors(
speed, acceleration_definition_threshold=acc_thres[x])
df_maneuvers_objects[i] = pd.DataFrame(data=None)
df_maneuvers_objects[i]['FM_EGO_accelerate'] = accelerate_array
df_maneuvers_objects[i]['FM_EGO_start'] = start_array
df_maneuvers_objects[i]['FM_EGO_keep_velocity'] = keep_velocity_array
df_maneuvers_objects[i]['FM_EGO_standstill'] = standstill_array
df_maneuvers_objects[i]['FM_EGO_decelerate'] = decelerate_array
df_maneuvers_objects[i]['FM_EGO_stop'] = stop_array
df_maneuvers_objects[i]['FM_EGO_reversing'] = reversing_array
left_lane_change_array, right_lane_change_array = rulebased.create_lateral_maneuver_vectors(df_lanes, df[
movobj_grps_coord[i][0]], df[movobj_grps_coord[i][1]]) # lat lon
df_maneuvers_objects[i]['FM_INF_lane_change_left'] = left_lane_change_array
df_maneuvers_objects[i]['FM_INF_lane_change_right'] = right_lane_change_array
# Init
# Get projection coordinates of respective open drive from open drive file
proj_in = pyproj.Proj('EPSG:4326')
proj_out = get_proj_from_open_drive(open_drive_path=opendrive_path)
columns = ['lat', 'long', 'speed', 'heading']
# Get start position, speed and heading of ego
ego = []
lon, lat = (pyproj.Transformer.from_crs(proj_in.crs, proj_out.crs, always_xy=True)).transform(
df[columns[1]][0],
df[columns[0]][0])
ego.append(lon)
ego.append(lat)
ego.append(df[columns[2]][0] / 3.6) # Speed
ego.append(utils.convert_heading(df[columns[3]][0])) # Heading
# Get start position, speed and heading of other objects
objects = {}
for i in range(len(movobj_grps_coord)):
lon, lat = (pyproj.Transformer.from_crs(proj_in.crs, proj_out.crs, always_xy=True)).transform(
df[movobj_grps_coord[i][1]][0],
df[movobj_grps_coord[i][0]][0])
obj = list()
obj.append(lon) # Lon
obj.append(lat) # Lat
obj.append(df[movobj_grps_coord[i][2]][0] / 3.6) # Speed
temp_heading = utils.calc_heading_from_two_geo_positions(df[movobj_grps_coord[i][0]][0], df[movobj_grps_coord[i][1]][0],
df[movobj_grps_coord[i][0]][1], df[movobj_grps_coord[i][1]][1])
obj.append(utils.convert_heading(temp_heading))
objects[i] = obj
# Get maneuvers
ego_maneuver_array = {}
for j in range(len(df_maneuvers_objects) + 1): # + 1 because of ego maneuvers
# Ego basis maneuvers for speed & acceleration control
acceleration_switch = -1
keep_switch = -1
deceleration_switch = -1
standstill_switch = -1
temp_ego_maneuver_array = np.empty(shape=[0, 7])
if j == 0: # Use ego
maneuvers = df_maneuvers
cols = columns
else: # Use objects
maneuvers = df_maneuvers_objects[j - 1]
cols = movobj_grps_coord[j - 1]
# Get start and end time of each maneuver
for i in range(len(maneuvers)):
# Start
if maneuvers['FM_EGO_accelerate'][i] == 1 and acceleration_switch == -1:
temp_lon, temp_lat = (pyproj.Transformer.from_crs(proj_in.crs, proj_out.crs, always_xy=True)).\
transform(
df[cols[1]][i],
df[cols[0]][i])
temp_ego_maneuver_array = np.append(temp_ego_maneuver_array,
[[i, i, 'FM_EGO_accelerate', temp_lon, temp_lat, 0, 0]], axis=0)
acceleration_switch = temp_ego_maneuver_array.shape[0] - 1
# End
elif maneuvers['FM_EGO_accelerate'][i] == 0 and acceleration_switch > -1:
temp_ego_maneuver_array[acceleration_switch][1] = i - 1
# Target speed
temp_ego_maneuver_array[acceleration_switch][5] = df[cols[2]][i - 1] / 3.6
# Calculate the acceleration = (target speed - start speed) / duration
temp_ego_maneuver_array[acceleration_switch][6] = abs(df[cols[2]][i - 1] / 3.6 - (
df[cols[2]][int(temp_ego_maneuver_array[acceleration_switch][0])] / 3.6)) / ((i - int(
temp_ego_maneuver_array[acceleration_switch][0])) / 10)
acceleration_switch = -1
if maneuvers['FM_EGO_keep_velocity'][i] == 1 and keep_switch == -1:
temp_lon, temp_lat = (pyproj.Transformer.from_crs(proj_in.crs, proj_out.crs, always_xy=True)).\
transform(
df[cols[1]][i],
df[cols[0]][i])
temp_ego_maneuver_array = np.append(temp_ego_maneuver_array,
[[i, i, 'FM_EGO_keep_velocity', temp_lon, temp_lat, 0, 0]],
axis=0)
keep_switch = temp_ego_maneuver_array.shape[0] - 1
elif maneuvers['FM_EGO_keep_velocity'][i] == 0 and keep_switch > -1:
temp_ego_maneuver_array[keep_switch][1] = i - 1
temp_ego_maneuver_array[keep_switch][5] = df[cols[2]][i - 1] / 3.6
temp_ego_maneuver_array[keep_switch][6] = abs(df[cols[2]][i - 1] / 3.6 - (
df[cols[2]][int(temp_ego_maneuver_array[keep_switch][0])] / 3.6)) / ((i - int(
temp_ego_maneuver_array[keep_switch][0])) / 10)
keep_switch = -1
if maneuvers['FM_EGO_decelerate'][i] == 1 and deceleration_switch == -1:
temp_lon, temp_lat = (pyproj.Transformer.from_crs(proj_in.crs, proj_out.crs, always_xy=True)).\
transform(
df[cols[1]][i],
df[cols[0]][i])
temp_ego_maneuver_array = np.append(temp_ego_maneuver_array,
[[i, i, 'FM_EGO_decelerate', temp_lon, temp_lat, 0, 0]],
axis=0)
deceleration_switch = temp_ego_maneuver_array.shape[0] - 1
elif maneuvers['FM_EGO_decelerate'][i] == 0 and deceleration_switch > -1:
temp_ego_maneuver_array[deceleration_switch][1] = i - 1
temp_ego_maneuver_array[deceleration_switch][5] = df[cols[2]][i - 1] / 3.6
temp_ego_maneuver_array[deceleration_switch][6] = abs(df[cols[2]][i - 1] / 3.6 - (
df[cols[2]][int(temp_ego_maneuver_array[deceleration_switch][0])] / 3.6)) / ((i - int(
temp_ego_maneuver_array[deceleration_switch][0])) / 10)
deceleration_switch = -1
if maneuvers['FM_EGO_standstill'][i] == 1 and standstill_switch == -1:
if len(temp_ego_maneuver_array) > 0: # Assure that last maneuver (if it exists) ends with 0 km/h
temp_ego_maneuver_array[len(temp_ego_maneuver_array) - 1][5] = 0.0
temp_lon, temp_lat = pyproj.transform(proj_in, proj_out, df[cols[1]][i], df[cols[0]][i])
temp_ego_maneuver_array = np.append(temp_ego_maneuver_array,
[[i, i, 'FM_EGO_standstill', temp_lon, temp_lat, 0, 0]],
axis=0)
standstill_switch = temp_ego_maneuver_array.shape[0] - 1
elif maneuvers['FM_EGO_standstill'][i] == 0 and standstill_switch > -1:
temp_ego_maneuver_array[standstill_switch][1] = i - 1
temp_ego_maneuver_array[standstill_switch][5] = df[cols[2]][i - 1] / 3.6
temp_ego_maneuver_array[standstill_switch][6] = abs(df[cols[2]][i - 1] / 3.6 - (
df[cols[2]][int(temp_ego_maneuver_array[standstill_switch][0])] / 3.6)) / ((i - int(
temp_ego_maneuver_array[standstill_switch][0])) / 10)
standstill_switch = -1
if j == 0:
speed = df['speed']
else:
speed_tmp = df[movobj_grps_coord[j - 1][2]]
speed = []
for y in range(len(speed_tmp)):
if not math.isnan(speed_tmp[y]):
speed.append(speed_tmp[y])
speed = np.array(speed)
# Calculate the model speed in osc format (linear accelerations)
if temp_ego_maneuver_array.size != 0:
df_ego_maneuver_array = pd.DataFrame(
data=temp_ego_maneuver_array[0:, 0:],
index=temp_ego_maneuver_array[0:, 0],
columns=temp_ego_maneuver_array[0, 0:])
model_speed = create_speed_model(df_ego_maneuver_array, speed[0])
# Use RMSE Value for calculating the difference
if len(model_speed) - len(speed) == 1:
rmse_speed = np.sqrt(np.square(np.subtract(model_speed[0:-1], speed)).mean())
elif len(speed) - len(model_speed) == 1:
rmse_speed = np.sqrt(np.square(np.subtract(model_speed, speed[0:-1])).mean())
elif len(model_speed) == len(speed):
rmse_speed = np.sqrt(np.square(np.subtract(model_speed, speed)).mean())
else:
rmse_speed = 999
else:
rmse_speed = 999
ego_maneuver_array[j] = temp_ego_maneuver_array
quality_array[j][x] = rmse_speed
# Get the minumum RMSE values for each object (EGO + Player)
num_rows, num_cols = quality_array.shape
df_opt_acc = pd.DataFrame()
acc_thres_opt = []
obj_qual = np.empty(0)
for z in range(num_rows):
obj_qual = quality_array[z][:]
acc_thres_opt.append(acc_thres[np.argmin(obj_qual)])
if z == 0:
playername = 'EGO'
else:
playername = 'Player' + str(z)
df_opt_acc[playername] = [acc_thres[np.argmin(obj_qual)]]
if use_folder:
if not os.path.isdir(os.path.abspath(dir_name)):
raise FileNotFoundError("input must be a valid path.")
if not os.path.exists(os.path.abspath(dir_name)):
raise NotADirectoryError("input must be a directory.")
maneuver_dir = os.path.abspath(dir_name + '/maneuver_lists')
if not os.path.exists(maneuver_dir):
os.mkdir(maneuver_dir)
df_opt_acc.to_csv(maneuver_dir + '/acc_thres_values.csv')
acc_thres_opt.append(acc_thres[np.argmin(obj_qual)])
return np.array(acc_thres_opt)
def label_maneuvers(df: pd.DataFrame, df_lanes: pd.DataFrame, acc_threshold: Union[float, np.ndarray], generate_kml: bool,
opendrive_path: str, use_folder: bool, dir_name: str) -> tuple:
"""
Used for labeling the maneuvers
Args:
df: Main processed Dataframe
df_lanes: Dataframe which contains absolute positions of lanes
acc_threshold: Acceleration threshold for labeling
generate_kml: Option to generate kml files
opendrive_path: Path to opendrive file
use_folder: Option to create folder structure
dir_name: Name of the folder
Returns:
object (tuple): ego_maneuver_array, inf_maneuver_array, objlist, objects, ego, movobj_grps_coord
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("input must be a pd.DataFrame")
if not isinstance(df_lanes, pd.DataFrame):
raise TypeError("input must be a pd.DataFrame")
if not (isinstance(acc_threshold, float) or isinstance(acc_threshold, np.ndarray)):
raise TypeError("input must be a float or np.ndarray")
if not isinstance(generate_kml, bool):
raise TypeError("input must be a bool")
if not isinstance(opendrive_path, str):
raise TypeError("input must be a str")
if not isinstance(use_folder, bool):
raise TypeError("input must be a bool")
if not isinstance(dir_name, str):
raise TypeError("input must be a str")
# Get signals from trajectories file
speed = df['speed']
# Labeling
lane_change_left_array, lane_change_right_array = rulebased.create_lateral_maneuver_vectors(df_lanes,
df['lat'],
df['long'])
if isinstance(acc_threshold, int) or isinstance(acc_threshold, float):
accelerate_array, \
start_array, \
keep_velocity_array, \
standstill_array, \
decelerate_array, \
stop_array, \
reversing_array = rulebased.create_longitudinal_maneuver_vectors(
speed, acceleration_definition_threshold=acc_threshold)
else:
accelerate_array, \
start_array, \
keep_velocity_array, \
standstill_array, \
decelerate_array, \
stop_array, \
reversing_array = rulebased.create_longitudinal_maneuver_vectors(
speed, acceleration_definition_threshold=acc_threshold[0])
# Create df with maneuver info
df_maneuvers = | pd.DataFrame(data=None) | pandas.DataFrame |
import re
import os
import json
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
# spreadsheet cleaning and formatting
def clean_column_name(colname):
"""convert column names to lowercase with underbars"""
colname = colname.lower().rstrip().lstrip()
colname = re.sub(r'[^a-z0-9_]+','_',colname) # sub _ for nonalpha chars
colname = re.sub(r'_$','',colname) # remove trailing _
colname = re.sub(r'^([0-9])',r'_\1',colname) # insert _ before leading digit
return colname
def clean_column_names(df, col_map={}, inplace=False):
"""clean all column names for a Pandas dataframe"""
if not inplace:
df = df.copy()
ccns = []
for c in df.columns:
if c in col_map:
ccns.append(col_map[c])
else:
ccns.append(clean_column_name(c))
df.columns = ccns
return df
def drop_columns(df, columns, inplace=False):
"""drop a list of columns from a Pandas dataframe,
in place"""
if not inplace:
df = df.copy()
for c in columns:
df.pop(c)
return df
def dropna_except(df, except_subset, inplace=False):
"""drop rows containing nans from a Pandas dataframe,
but allow nans in the specified subset of columns,
in place"""
subset = set(df.columns)
for ec in except_subset:
subset.remove(ec)
df = df.dropna(inplace=inplace, subset=subset)
return df
def cast_columns(df, dtype, columns, inplace=False, fillna=None):
"""convert columns in a dataframe to the given datatype,
in place"""
if not inplace:
df = df.copy()
for c in columns:
if fillna is not None:
df[c] = df[c].fillna(fillna)
df[c] = df[c].astype(dtype)
return df
def float_to_datetime(s, format='%Y%m%d'):
"""pandas will interpret some datetime formats as floats, e.g.,
'20180830' will be parsed as the float 20180830.0.
convert back to datetimes"""
def convert(value):
return pd.to_datetime(str(int(value)), format=format, utc=True)
return s.map(convert, na_action='ignore')
# return pd.to_datetime(s.astype(int).astype(str), format=format)
def doy_to_datetime(doy, year, zero_based=False):
"""convert a decimal day of year (e.g., 34.58275) to a datetime.
Day is one-based unless zero_based param is True"""
origin = '{}-01-01'.format(year)
o = pd.Timestamp(origin)
if not zero_based:
adjusted_doy = doy - 1
return | pd.to_datetime(adjusted_doy, unit='D', origin=o, utc=True) | pandas.to_datetime |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os.path
import pkg_resources
import tempfile
import unittest
import numpy as np
import pandas as pd
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn, MetadataFileError)
def get_data_path(filename):
return pkg_resources.resource_filename('qiime2.metadata.tests',
'data/%s' % filename)
# NOTE: many of the test files in the `data` directory intentionally have
# leading/trailing whitespace characters on some lines, as well as mixed usage
# of spaces, tabs, carriage returns, and newlines. When editing these files,
# please make sure your code editor doesn't strip these leading/trailing
# whitespace characters (e.g. Atom does this by default), nor automatically
# modify the files in some other way such as converting Windows-style CRLF
# line terminators to Unix-style newlines.
#
# When committing changes to the files, carefully review the diff to make sure
# unintended changes weren't introduced.
class TestLoadErrors(unittest.TestCase):
def test_path_does_not_exist(self):
with self.assertRaisesRegex(MetadataFileError,
"Metadata file path doesn't exist"):
Metadata.load(
'/qiime2/unit/tests/hopefully/this/path/does/not/exist')
def test_path_is_directory(self):
fp = get_data_path('valid')
with self.assertRaisesRegex(MetadataFileError,
"path points to something other than a "
"file"):
Metadata.load(fp)
def test_non_utf_8_file(self):
fp = get_data_path('invalid/non-utf-8.tsv')
with self.assertRaisesRegex(MetadataFileError,
'encoded as UTF-8 or ASCII'):
Metadata.load(fp)
def test_utf_16_le_file(self):
fp = get_data_path('invalid/simple-utf-16le.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_utf_16_be_file(self):
fp = get_data_path('invalid/simple-utf-16be.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_empty_file(self):
fp = get_data_path('invalid/empty-file')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*file may be empty'):
Metadata.load(fp)
def test_comments_and_empty_rows_only(self):
fp = get_data_path('invalid/comments-and-empty-rows-only.tsv')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*only of comments or empty '
'rows'):
Metadata.load(fp)
def test_header_only(self):
fp = get_data_path('invalid/header-only.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_header_only_with_comments_and_empty_rows(self):
fp = get_data_path(
'invalid/header-only-with-comments-and-empty-rows.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_qiime1_empty_mapping_file(self):
fp = get_data_path('invalid/qiime1-empty.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_invalid_header(self):
fp = get_data_path('invalid/invalid-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'unrecognized ID column name.*'
'invalid_id_header'):
Metadata.load(fp)
def test_empty_id(self):
fp = get_data_path('invalid/empty-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_whitespace_only_id(self):
fp = get_data_path('invalid/whitespace-only-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_empty_column_name(self):
fp = get_data_path('invalid/empty-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_whitespace_only_column_name(self):
fp = get_data_path('invalid/whitespace-only-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_duplicate_ids(self):
fp = get_data_path('invalid/duplicate-ids.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_ids_with_whitespace(self):
fp = get_data_path('invalid/duplicate-ids-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_column_names(self):
fp = get_data_path('invalid/duplicate-column-names.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_duplicate_column_names_with_whitespace(self):
fp = get_data_path(
'invalid/duplicate-column-names-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_id_conflicts_with_id_header(self):
fp = get_data_path('invalid/id-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"ID 'id' conflicts.*ID column header"):
Metadata.load(fp)
def test_column_name_conflicts_with_id_header(self):
fp = get_data_path(
'invalid/column-name-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column name 'featureid' conflicts.*ID "
"column header"):
Metadata.load(fp)
def test_column_types_unrecognized_column_name(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'not_a_column.*column_types.*not a column '
'in the metadata file'):
Metadata.load(fp, column_types={'not_a_column': 'numeric'})
def test_column_types_unrecognized_column_type(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*column_types.*unrecognized column '
'type.*CATEGORICAL'):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'CATEGORICAL'})
def test_column_types_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'categorical',
'col3': 'numeric'})
def test_column_types_override_directive_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple-with-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col3': 'numeric'})
def test_directive_before_header(self):
fp = get_data_path('invalid/directive-before-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'directive.*#q2:types.*searching for '
'header'):
Metadata.load(fp)
def test_unrecognized_directive(self):
fp = get_data_path('invalid/unrecognized-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Unrecognized directive.*#q2:foo.*'
'#q2:types directive is supported'):
Metadata.load(fp)
def test_duplicate_directives(self):
fp = get_data_path('invalid/duplicate-directives.tsv')
with self.assertRaisesRegex(MetadataFileError,
'duplicate directive.*#q2:types'):
Metadata.load(fp)
def test_unrecognized_column_type_in_directive(self):
fp = get_data_path('invalid/unrecognized-column-type.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*unrecognized column type.*foo.*'
'#q2:types directive'):
Metadata.load(fp)
def test_column_types_directive_not_convertible_to_numeric(self):
fp = get_data_path('invalid/types-directive-non-numeric.tsv')
# This error message regex is intentionally verbose because we want to
# assert that many different types of non-numeric strings aren't
# interpreted as numbers. The error message displays a sorted list of
# all values that couldn't be converted to numbers, making it possible
# to test a variety of non-numeric strings in a single test case.
msg = (r"column 'col2' to numeric.*could not be interpreted as "
r"numeric: '\$42', '\+inf', '-inf', '0xAF', '1,000', "
r"'1\.000\.0', '1_000_000', '1e3e4', 'Infinity', 'NA', 'NaN', "
"'a', 'e3', 'foo', 'inf', 'nan', 'sample-1'")
with self.assertRaisesRegex(MetadataFileError, msg):
Metadata.load(fp)
def test_directive_after_directives_section(self):
fp = get_data_path(
'invalid/directive-after-directives-section.tsv')
with self.assertRaisesRegex(MetadataFileError,
'#q2:types.*outside of the directives '
'section'):
Metadata.load(fp)
def test_directive_longer_than_header(self):
fp = get_data_path('invalid/directive-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
def test_data_longer_than_header(self):
fp = get_data_path('invalid/data-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
class TestLoadSuccess(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
# This Metadata object is compared against observed Metadata objects in
# many of the tests, so just define it once here.
self.simple_md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
# Basic sanity check to make sure the columns are ordered and typed as
# expected. It'd be unfortunate to compare observed results to expected
# results that aren't representing what we think they are!
obs_columns = [(name, props.type)
for name, props in self.simple_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
# Simple metadata file without comments, empty rows, jaggedness,
# missing data, odd IDs or column names, directives, etc. The file has
# multiple column types (numeric, categorical, and something that has
# mixed numbers and strings, which must be interpreted as categorical).
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_bom_simple_txt(self):
# This is the encoding that notepad.exe will use most commonly
fp = get_data_path('valid/BOM-simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_different_file_extension(self):
fp = get_data_path('valid/simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_newline_at_eof(self):
fp = get_data_path('valid/no-newline-at-eof.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_unix_line_endings(self):
fp = get_data_path('valid/unix-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_windows_line_endings(self):
fp = get_data_path('valid/windows-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_mac_line_endings(self):
fp = get_data_path('valid/mac-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_source_artifacts(self):
fp = get_data_path('valid/simple.tsv')
metadata = Metadata.load(fp)
self.assertEqual(metadata.artifacts, ())
def test_retains_column_order(self):
# Explicitly test that the file's column order is retained in the
# Metadata object. Many of the test cases use files with column names
# in alphabetical order (e.g. "col1", "col2", "col3"), which matches
# how pandas orders columns in a DataFrame when supplied with a dict
# (many of the test cases use this feature of the DataFrame
# constructor when constructing the expected DataFrame).
fp = get_data_path('valid/column-order.tsv')
obs_md = Metadata.load(fp)
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_columns = ['z', 'y', 'x']
exp_data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_leading_trailing_whitespace(self):
fp = get_data_path('valid/leading-trailing-whitespace.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_comments(self):
fp = get_data_path('valid/comments.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_empty_rows(self):
fp = get_data_path('valid/empty-rows.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_qiime1_mapping_file(self):
fp = get_data_path('valid/qiime1.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='#SampleID')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_sample_information_file(self):
fp = get_data_path('valid/qiita-sample-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'DESCRIPTION': ['description 1', 'description 2'],
'TITLE': ['A Title', 'Another Title']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_preparation_information_file(self):
fp = get_data_path('valid/qiita-preparation-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'BARCODE': ['ACGT', 'TGCA'],
'EXPERIMENT_DESIGN_DESCRIPTION': ['longitudinal study',
'longitudinal study']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_biom_observation_metadata_file(self):
fp = get_data_path('valid/biom-observation-metadata.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['OTU_1', 'OTU_2'], name='#OTUID')
exp_df = pd.DataFrame([['k__Bacteria;p__Firmicutes', 0.890],
['k__Bacteria', 0.9999]],
columns=['taxonomy', 'confidence'],
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
fp = os.path.join(self.temp_dir, 'metadata.tsv')
count = 0
for header in headers:
with open(fp, 'w') as fh:
fh.write('%s\tcolumn\nid1\tfoo\nid2\tbar\n' % header)
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2'], name=header)
exp_df = pd.DataFrame({'column': ['foo', 'bar']}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
fp = get_data_path('valid/recommended-ids.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
exp_df = pd.DataFrame({'col1': ['foo', 'bar']}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_non_standard_characters(self):
# Test that non-standard characters in IDs, column names, and cells are
# handled correctly. The test case isn't exhaustive (e.g. it doesn't
# test every Unicode character; that would be a nice additional test
# case to have in the future). Instead, this test aims to be more of an
# integration test for the robustness of the reader to non-standard
# data. Many of the characters and their placement within the data file
# are based on use-cases/bugs reported on the forum, Slack, etc. The
# data file has comments explaining these test case choices in more
# detail.
fp = get_data_path('valid/non-standard-characters.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
exp_columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"',
'col\t \r\n5']
exp_data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_missing_data(self):
fp = get_data_path('valid/missing-data.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['None', 'nan', 'NA'], name='id')
exp_df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', 'NA']),
('col4', np.array([np.nan, np.nan, np.nan], dtype=object))]),
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
# Test that column types are correct (mainly for the two empty columns;
# one should be numeric, the other categorical).
obs_columns = [(name, props.type)
for name, props in obs_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('NA', 'numeric'),
('col3', 'categorical'), ('col4', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def test_minimal_file(self):
# Simplest possible metadata file consists of one ID and zero columns.
fp = get_data_path('valid/minimal.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['a'], name='id')
exp_df = pd.DataFrame({}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_single_id(self):
fp = get_data_path('valid/single-id.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1'], name='id')
exp_df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_no_columns(self):
fp = get_data_path('valid/no-columns.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['a', 'b', 'my-id'], name='id')
exp_df = pd.DataFrame({}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_single_column(self):
fp = get_data_path('valid/single-column.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0]}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_trailing_columns(self):
fp = get_data_path('valid/trailing-columns.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_jagged_trailing_columns(self):
# Test case based on https://github.com/qiime2/qiime2/issues/335
fp = get_data_path('valid/jagged-trailing-columns.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_padding_rows_shorter_than_header(self):
fp = get_data_path('valid/rows-shorter-than-header.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, np.nan],
'col2': ['a', np.nan, np.nan],
'col3': [np.nan, np.nan, np.nan]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_all_cells_padded(self):
fp = get_data_path('valid/all-cells-padded.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [np.nan, np.nan, np.nan],
'col2': [np.nan, np.nan, np.nan],
'col3': [np.nan, np.nan, np.nan]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_does_not_cast_ids_or_column_names(self):
fp = get_data_path('valid/no-id-or-column-name-type-cast.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['0.000001', '0.004000', '0.000000'],
dtype=object, name='id')
exp_columns = ['42.0', '1000', '-4.2']
exp_data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_numeric_column(self):
fp = get_data_path('valid/numeric-column.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11', 'id12'], name='id')
exp_df = pd.DataFrame({'col1': [0.0, 2.0, 0.0003, -4.2, 1e-4, 1e4,
1.5e2, np.nan, 1.0, 0.5, 1e-8, -0.0]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_numeric_column_as_categorical(self):
fp = get_data_path('valid/numeric-column.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11', 'id12'], name='id')
exp_df = pd.DataFrame({'col1': ['0', '2.0', '0.00030', '-4.2', '1e-4',
'1e4', '+1.5E+2', np.nan, '1.', '.5',
'1e-08', '-0']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_complete_types_directive(self):
fp = get_data_path('valid/complete-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_partial_types_directive(self):
fp = get_data_path('valid/partial-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_empty_types_directive(self):
fp = get_data_path('valid/empty-types-directive.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_with_case_insensitive_types_directive(self):
fp = get_data_path('valid/case-insensitive-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': [-5.0, 0.0, 42.0]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_column_types_without_directive(self):
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_column_types_override_directive(self):
fp = get_data_path('valid/simple-with-directive.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical',
'col2': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
class TestSave(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
self.filepath = os.path.join(self.temp_dir, 'metadata.tsv')
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\tcol2\tcol3\n"
"#q2:types\tnumeric\tcategorical\tcategorical\n"
"id1\t1\ta\tfoo\n"
"id2\t2\tb\tbar\n"
"id3\t3\tc\t42\n"
)
self.assertEqual(obs, exp)
def test_save_metadata_auto_extension(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index= | pd.Index(['id1', 'id2', 'id3'], name='id') | pandas.Index |
"""
This is a program which automatically fetches, organizes, and graphs stock data for a user's desired ticker.
It allows the user to see the High, Low, Open, Close, and Volume of a ticker for the past 2 weeks.
It also provides metrics such as the Average Volume, Volatility, and Stochastic Oscillator.
On top of that, it provides graphs the show the Volume, Moving Average, Expected Returns, and correlation to the S&P 500.
It is designed to assist in the technical analysis of a stock and graph its fundemantal metrics.
"""
import math
import datetime
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import style
import pandas_datareader.data as pdr
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.0"
__status__ = "Development"
mpl.rc("figure", figsize = (7, 8))
style.use("seaborn")
ticker = input("Please enter the ticker of the desired stock: ").upper()
month = int(input("How many months back should we fetch the historic data? "))
cur_date = datetime.datetime.today() #gets the current date
orig_date = pd.to_datetime(cur_date, format="%Y-%m-%d") - pd.DateOffset(months = month) #gets the date the specified months back
start = datetime.datetime(orig_date.year, orig_date.month, orig_date.day)
end = datetime.datetime(cur_date.year, cur_date.month, cur_date.day)
orig_date_2 = | pd.to_datetime(cur_date, format="%Y-%m-%d") | pandas.to_datetime |
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
from time import strptime, mktime
from pandas import Timestamp, DateOffset, to_datetime, Series, NaT, isnull
import calendar
from calendar import timegm
from datetime import datetime, timedelta
import pytz
from pytz import timezone
from stocklook.config import config
import logging as lg
log = lg.getLogger(__name__)
# Time-related helper methods
TZ = 'PYTZ_TIMEZONE'
GLOBAL_TIMEOUT_MAP = dict()
def timestamp_to_local(dt):
"""
Convert nearly any time object to local time.
:param dt:
The following objects are tested:
- utc integer/float/numeric string
- datetime.datetime
- pandas.Timestamp
- date or datetime string coercible by pandas.Timestamp algos
-
:return:
"""
try:
return localize_utc_int(dt)
except:
if not dt:
return None
if isinstance(dt, str):
# convert a string-ish object to a
# pandas.Timestamp (way smarter than datetime)
utc_dt = Timestamp(dt)
# Get rid of existing timezones
dt = de_localize_datetime(dt)
if hasattr(dt, 'utctimetuple'):
dt = timegm(dt.utctimetuple())
return localize_utc_int(dt)
def localize_utc_int(utc_int):
tz = timezone(config[TZ])
utc_dt = datetime.fromtimestamp(int(float(utc_int)), pytz.utc)
return utc_dt.astimezone(tz)
def de_localize_datetime(dt):
tz_info = getattr(dt, 'tzinfo', None)
if tz_info and tz_info != pytz.utc:
if hasattr(dt, 'astimezone'):
dt = dt.astimezone(pytz.utc)
return dt.replace(tzinfo=None)
return dt
def timestamp_trim_to_min(time_stamp):
t = Timestamp(time_stamp)
s = DateOffset(seconds=t.second)
return t - s
def timestamp_trim_to_hour(time_stamp):
t = Timestamp(time_stamp)
s = DateOffset(seconds=t.second, minutes=t.minute)
return t - s
def timestamp_trim_to_date(time_stamp):
t = Timestamp(time_stamp)
return t.date
def timestamp_from_utc(utc_time):
try:
utc = datetime.utcfromtimestamp(utc_time)
utc_form = utc.strftime('%Y-%m-%d %H:%M:%S')
except (TypeError, OSError):
utc_form = utc_time
return | Timestamp(utc_form) | pandas.Timestamp |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
tm.assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
tm.assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self) -> str:
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
result = df_nonprintable.to_json(default_handler=str)
expected = f'{{"A":{{"0":"{hexed}"}}}}'
assert result == expected
assert (
df_mixed.to_json(default_handler=str)
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
tm.assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_default_orient(self, string_series):
assert string_series.to_json() == string_series.to_json(orient="index")
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_simple(self, orient, numpy, string_series):
data = string_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = string_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype, object_series):
data = object_series.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
expected = object_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_empty(self, orient, numpy, empty_series):
data = empty_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = empty_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
else:
expected.index = expected.index.astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_timeseries(self, orient, numpy, datetime_series):
data = datetime_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = datetime_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, np.int])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_numeric(self, orient, numpy, dtype):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
data = s.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = s.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
tm.assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
(False, Series([946684800000])),
],
)
def test_series_with_dtype_datetime(self, dtype, expected):
s = Series(["2000-01-01"], dtype="datetime64[ns]")
data = s.to_json()
result = pd.read_json(data, typ="series", dtype=dtype)
tm.assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
tm.assert_frame_equal(
result, df, check_index_type=False, check_column_type=False
)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
tm.assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
def test_path(self, float_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self, datetime_series):
# frame
json = self.tsframe.to_json()
result = read_json(json)
tm.assert_frame_equal(result, self.tsframe)
# series
json = datetime_series.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, datetime_series, check_names=False)
assert result.name is None
def test_convert_dates(self, datetime_series):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
tm.assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=datetime_series.index)
json = ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize(
"date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
)
def test_date_index_and_values(self, date_format, as_object, date_typ):
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
expected = '{"1577836800000":1577836800000,"null":null}'
else:
expected = (
'{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
)
if as_object:
expected = expected.replace("}", ',"a":"a"}')
assert result == expected
@pytest.mark.parametrize(
"infer_word",
[
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
],
)
def test_convert_dates_infer(self, infer_word):
# GH10747
from pandas.io.json import dumps
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_frame(self, date, date_unit):
df = self.tsframe.copy()
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
def test_date_format_frame_raises(self):
df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_series(self, date, date_unit, datetime_series):
ts = Series(Timestamp(date), index=datetime_series.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_date_format_series_raises(self, datetime_series):
ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
def test_date_unit(self, unit):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
tm.assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
tm.assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
tm.assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with pytest.raises(AssertionError, match=error_msg):
tm.assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@tm.network
@pytest.mark.single
def test_round_trip_exception_(self):
# GH 3867
csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv"
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
tm.assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
@tm.network
@pytest.mark.single
@pytest.mark.parametrize(
"field,dtype",
[
["created_at", pd.DatetimeTZDtype(tz="UTC")],
["closed_at", "datetime64[ns]"],
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
def test_url(self, field, dtype):
url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa
result = read_json(url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
converter = lambda x: | pd.to_timedelta(x, unit="ms") | pandas.to_timedelta |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
self.regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="1D", min_periods=minp)
def test_invalid_center_datetimelike(self):
# center is not implemented
msg = "center is not implemented for datetimelike and offset based windows"
with pytest.raises(NotImplementedError, match=msg):
self.regular.rolling(window="1D", center=True)
def test_on(self):
df = self.regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2d", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2d", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic
msg = "index must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
# we are doing simulating using 'on'
expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)
result = df.rolling("2s", on="A").B.sum()
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (
df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]
)
result = df.rolling("2s", on="A")[["B"]].sum()
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame(
{
"A": [0, 1, 2, 3, 4],
"B": [0, 1, 2, np.nan, 4],
"C": Index(
[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
),
},
columns=["A", "C", "B"],
)
expected1 = DataFrame(
{"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]},
columns=["A", "C", "B"],
)
result = df.rolling("2s", on="C").sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name="B")
result = df.rolling("2s", on="C").B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[["A", "B", "C"]]
result = df.rolling("2s", on="C")[["A", "B", "C"]].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = date_range("20130101", periods=5, freq="D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="1D").sum()
tm.assert_frame_equal(result, expected)
df.index = date_range("20130101", periods=5, freq="2D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window="2D").sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self):
# xref GH13965
df = DataFrame(
{"A": [1] * 5},
index=[
Timestamp("20130101 09:00:01"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:04"),
Timestamp("20130101 09:00:06"),
],
)
# closed must be 'right', 'left', 'both', 'neither'
msg = "closed must be 'right', 'left', 'both' or 'neither'"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="2s", closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling("2s", closed="right").sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling("2s", closed="both").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling("2s", closed="left").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling("2s", closed="neither").sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s").sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=3).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# import all the required files i.e. numpy , pandas and math library
from graphlib.financialGraph import Data
import numpy as np
import pandas as pd
from pandas import DataFrame , Series
import math
# All the indicators are defined and arranged in Alphabetical order
# ------------------> A <------------------------
# [0] __ Average True Range (ATR)
# Moving Average of True Range(TR)
def atr(data: DataFrame, period: int = 14) -> Series:
TR = tr(data)
return pd.Series(
TR.rolling(center=False, window=period,
min_periods=1).mean(),
name=f'{period} ATR'
)
# [0] __ Adaptive Price Zone (APZ)
# TODO
def apz(data: DataFrame,period: int = 21,dev_factor: int = 2,
MA: Series = None,adjust: bool = True,) -> DataFrame:
if not isinstance(MA, pd.Series):
MA = dema(data, period)
price_range = pd.Series(
(data["high"] - data["low"]).ewm(span=period, adjust=adjust).mean()
)
volatility_value = pd.Series(
price_range.ewm(span=period, adjust=adjust).mean(), name="vol_val"
)
upper_band = pd.Series((volatility_value * dev_factor) + MA, name="UPPER")
lower_band = pd.Series(MA - (volatility_value * dev_factor), name="LOWER")
return pd.concat([upper_band, lower_band], axis=1)
# ------------------> B <------------------------
# [0] __ Bollinger Bands (BBANDS)
# TODO
def bbands(data: DataFrame,period: int = 20,MA: Series = None,
column: str = "close",std_multiplier: float = 2,) -> DataFrame:
std = data[column].rolling(window=period).std()
if not isinstance(MA, pd.core.series.Series):
middle_band = pd.Series(sma(data, period), name="BB_MIDDLE")
else:
middle_band = pd.Series(MA, name="BB_MIDDLE")
upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER")
lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER")
return pd.concat([upper_bb, middle_band, lower_bb], axis=1)
# [0] __ Bollinger Bands Width (BBWidth)
# TODO
def bbwidth(
data: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
BB = bbands(data, period, MA, column)
return pd.Series(
(BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"],
name="{0} period BBWITH".format(period),
)
# ------------------> D <------------------------
# [0] __ Double Exponential Moving Average (DEMA)
# 2 * EWMA - ewm(EWMA)
def dema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
DEMA = (
2*ema(data,period) - ema(data,period).ewm(span=period , adjust=adjust).mean()
)
return pd.Series(
DEMA ,
name = f'{period}_DEMA'
)
# [0] __ Directional Movement Index (DMI)
# TODO
def dmi(data: DataFrame, column: str = "close", adjust: bool = True) -> Series:
def _get_time(close):
sd = close.rolling(5).std()
asd = sd.rolling(10).mean()
v = sd / asd
t = 14 / v.round()
t[t.isna()] = 0
t = t.map(lambda x: int(min(max(x, 5), 30)))
return t
def _dmi(index):
time = t.iloc[index]
if (index - time) < 0:
subset = data.iloc[0:index]
else:
subset = data.iloc[(index - time) : index]
return rsi(subset, period=time, adjust=adjust).values[-1]
dates = Series(data.index)
periods = Series(range(14, len(dates)), index=dates.index[14:].values)
t = _get_time(data[column])
return periods.map(lambda x: _dmi(x))
# ------------------> E <------------------------
# [0] __ Exponential Weighted Moving Average (EWMA) or Exponential Moving Average(EMA)
# Exponential average of prev n day prices
def ema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
return pd.Series(
data[column].ewm(span=period, adjust=adjust).mean(),
name = f'{period}_EMA'
)
# [0] __ Kaufman Efficiency indicator (KER) or (ER)
# change in price / volatility Here change and volatility are absolute
def er(data : DataFrame,period: int = 10,column: str ='close') -> Series:
change = data[column].diff(period).abs()
volatility = data[column].diff().abs().rolling(window=period,min_periods=1).sum()
return pd.Series(change / volatility,
name=f'{period}_ER'
)
# [0] __ TODO (EVSTC)
# TODO
def evstc(data: DataFrame,period_fast: int = 12,period_slow: int = 30,
k_period: int = 10,d_period: int = 3,adjust: bool = True) -> Series:
ema_slow = evwma(data, period_slow)
ema_fast = evwma(data, period_fast)
macd = ema_fast - ema_slow
STOK = pd.Series((
(macd - macd.rolling(window=k_period).min())
/ (macd.rolling(window=k_period).max() - macd.rolling(window=k_period).min())
) * 100)
STOD = STOK.rolling(window=d_period).mean()
STOD_DoubleSmooth = STOD.rolling(window=d_period).mean()
return pd.Series(STOD_DoubleSmooth, name="{0} period EVSTC".format(k_period))
# [0] __ Elastic Volume Weighted Moving Average (EVWMA)
# x is ((volume sum for n period) - volume ) divided by (volume sum for n period)
# y is volume * close / (volume sum for n period)
def evwma(data, period: int = 20) -> Series:
vol_sum = (data["volume"].rolling(window=period,min_periods=1).sum())
x = (vol_sum - data["volume"]) / vol_sum
y = (data["volume"] * data["close"]) / vol_sum
evwma = [0]
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=data.index,
name=f'{period}_EVWMA'
)
# [0] __ Elastic Volume Weighted Moving average convergence divergence (EV_MACD)
# MACD calculation on basis of Elastic Volume Weighted Moving average (EVWMA)
def ev_macd(data: DataFrame,period_fast: int = 20,period_slow: int = 40,
signal: int = 9,adjust: bool = True,) -> DataFrame:
evwma_slow = evwma(data, period_slow)
evwma_fast = evwma(data, period_fast)
MACD = | pd.Series(evwma_fast - evwma_slow, name="EV MACD") | pandas.Series |
import logging
logger = logging.getLogger(__name__)
import os
import sys
import zipfile
import math
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import lxml.html as lh
from datetime import datetime as dt
def read_rnaseq_metrics(path):
try:
logger.debug("unzipping contents for {}".format(path))
zfile = zipfile.ZipFile(path)
try:
metrics_file = zfile.open('CollectRnaSeqMetrics.metrics.txt')
except KeyError:
metrics_file = zfile.open('RNA_Seq_Metrics_html.html')
return metrics_file.readlines()
except:
logger.warning("not a zip file; reading lines directly")
with open(path) as f:
return f.readlines()
def get_norm_cov(rnaseq_metrics_lines):
logger.debug("parsing normalized coverage histogram")
try:
logger.debug("attempting to parse histogram from "
"expected location (lines 11-112)")
cov_hist_lines = rnaseq_metrics_lines[11:112]
norm_cov = [float(line.rstrip('\n').split('\t')[-1])
for line in cov_hist_lines]
except ValueError:
try:
logger.warning("parsing failed; attempting to parse histogram from "
"alternative location (lines 31-132)")
cov_hist_lines = rnaseq_metrics_lines[31:132]
norm_cov = [float(re.search('[0-9]*\t[0-9]+(\.[0-9]+)*', line)
.group()
.split('\t')[-1])
for line in cov_hist_lines]
# THIS IS A HACK-Y WORKAROUND. NEED TO PARSE TABLE BETTER
except AttributeError:
try:
logger.warning("parsing failed; attempting to parse histogram from "
"alternative location (lines 30-131)")
cov_hist_lines = rnaseq_metrics_lines[30:131]
norm_cov = [float(re.search('[0-9]*\t[0-9]+(\.[0-9]+)*', line)
.group()
.split('\t')[-1])
for line in cov_hist_lines]
except AttributeError:
logger.warning("no coverage histogram found, returning empty list")
norm_cov = []
return norm_cov
# def scrape_norm_cov_table(path):
# lh.parse(path)
def build_norm_cov_df(metrics_path):
rnaseq_metrics_files = [os.path.join(metrics_path, f)
for f in os.listdir(metrics_path)
if re.search('_al.zip', f)
or re.search('rnaseq_metrics.html', f)]
rnaseq_metrics_files.sort()
logger.info("found Picard RNA-seq metrics files for {} samples"
.format(len(rnaseq_metrics_files)))
norm_cov_dict = {}
for f in rnaseq_metrics_files:
logger.debug("reading RNA-seq metrics from {}".format(f))
rnaseq_metrics_lines = read_rnaseq_metrics(f)
norm_cov = get_norm_cov(rnaseq_metrics_lines)
norm_cov = [0] * 101 if not len(norm_cov) else norm_cov
logger.debug("parsing filename to get sample ID")
lib = ('_').join(os.path.basename(f).split('_')[0:2])
norm_cov_dict[lib] = norm_cov
return | pd.DataFrame(data=norm_cov_dict) | pandas.DataFrame |
# This file is part of the
# Garpar Project (https://github.com/quatrope/garpar).
# Copyright (c) 2021, 2022, <NAME>, <NAME> and QuatroPe
# License: MIT
# Full Text: https://github.com/quatrope/garpar/blob/master/LICENSE
# =============================================================================
# IMPORTS
# =============================================================================
from io import BytesIO
from garpar.io import read_hdf5
from garpar.core.portfolio import GARPAR_METADATA_KEY, Metadata, Portfolio
import numpy as np
import pandas as pd
import pytest
# =============================================================================
# TESTS
# =============================================================================
def test_Portfolio_creation():
df = pd.DataFrame({"stock": [1, 2, 3, 4, 5]})
df.attrs[GARPAR_METADATA_KEY] = Metadata(
{
"entropy": 0.5,
"window_size": 5,
}
)
manual_pf = Portfolio(df=df.copy(), weights=[1.0])
mk_pf = Portfolio.from_dfkws(
df=df,
entropy=0.5,
window_size=5,
)
assert manual_pf == mk_pf
def test_Portfolio_copy_eq_ne():
pf = Portfolio.from_dfkws(
df=pd.DataFrame({"stock": [1, 2, 3, 4, 5]}),
entropy=0.5,
window_size=5,
)
copy = pf.copy()
assert pf == copy
assert pf is not copy
assert (
pf._df.attrs[GARPAR_METADATA_KEY]
== copy._df.attrs[GARPAR_METADATA_KEY]
)
assert (
pf._df.attrs[GARPAR_METADATA_KEY]
is not copy._df.attrs[GARPAR_METADATA_KEY]
)
other = Portfolio.from_dfkws(
df=pd.DataFrame({"stock": [1, 2, 3, 4, 5]}),
entropy=0.25,
window_size=5,
)
assert pf != other
def test_Portfolio_bad_metadata():
df = pd.DataFrame({"stock": [1, 2, 3, 4, 5]})
df.attrs[GARPAR_METADATA_KEY] = None
with pytest.raises(TypeError):
Portfolio(df)
def test_Portfolio_repr():
pf = Portfolio.from_dfkws(
df= | pd.DataFrame({"stock": [1, 2, 3, 4, 5]}) | pandas.DataFrame |
__author__ = "<NAME>, <NAME>, <NAME>, <NAME>"
__license__ = "MIT"
__version__ = "0.1"
#================================================================================
# modules
#================================================================================
import pandas as pd
import numpy as np
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.neural_network import MLPClassifier
from sklearn.datasets import make_classification
from sklearn import datasets
from sklearn import metrics
from sklearn.dummy import DummyClassifier
from sklearn import tree
#================================================================================
# properties
#================================================================================
dataPath = './data/'
def main():
fullDf = load_dataframe('DataSet-cleaned-binary.csv')
trainDf, testDf = splitDataFrame(fullDf, 90)
RunDummyClassifier(trainDf, testDf)
# RunDecisionTreeClassifier(trainDf, testDf)
# RunRandomForestClassifier(trainDf, testDf)
# RunExtraTreesClassifier(trainDf, testDf)
# RunAdaBoostClassifier(trainDf, testDf)
# RunBaggingClassifier(trainDf, testDf)
# RunGradientBoostingClassifier(trainDf, testDf)
# RunVotingClassifier(trainDf, testDf)
# RunKNeighborsClassifier(trainDf, testDf)
# RunMLPClassifier(trainDf, testDf)
return None
# ██████╗██╗ █████╗ ███████╗███████╗██╗███████╗██╗███████╗██████╗ ███████╗
# ██╔════╝██║ ██╔══██╗██╔════╝██╔════╝██║██╔════╝██║██╔════╝██╔══██╗██╔════╝
# ██║ ██║ ███████║███████╗███████╗██║█████╗ ██║█████╗ ██████╔╝███████╗
# ██║ ██║ ██╔══██║╚════██║╚════██║██║██╔══╝ ██║██╔══╝ ██╔══██╗╚════██║
# ╚██████╗███████╗██║ ██║███████║███████║██║██║ ██║███████╗██║ ██║███████║
# ╚═════╝╚══════╝╚═╝ ╚═╝╚══════╝╚══════╝╚═╝╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚══════╝
def trainAndRetrainClassifier(classifier, X, X_, y, y_, train_X, train_y, test_X):
"""trainAndRetrainClassifier
Trains a classifier a first time, predicts a target feature and train the classifier again thereafter.
Input:
classifier -- the classifier to use
X -- X split of training set
X_ -- X split of testing set
y -- y split of training set
y_ -- y splif of testing set
train_X -- training set (sample)
train_y -- target feature
test_X -- testing set
Output:
y_test_c -- predicted values
"""
classifier.fit(X, y) # Training a first time
y_c = classifier.predict(X_) # Predicting (y_c represents the estimated targets as returned by the classifier)
evaluateModel(y_, y_c) # Evaluating model with validation set
classifier.fit(train_X, train_y) # Training again (with entire training set)
y_test_c = classifier.predict(test_X) # Predicting with test set
return y_test_c
def RunDummyClassifier(trainDf, testDf):
"""RunDummyClassifier
Runs a Dummy Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
# Split the training set into training and validation sets
X, X_, y, y_ = train_test_split(train_X, train_y, test_size=0.2)
dc = DummyClassifier(strategy='stratified', random_state=0)
y_test_dc = trainAndRetrainClassifier(dc, X, X_, y, y_, train_X, train_y, test_X)
savePredictions(y_test_dc, 'dc.csv')
def RunDecisionTreeClassifier(trainDf, testDf):
"""RunDecisionTreeClassifier
Runs a Decision Tree Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
# Split the training set into training and validation sets
X, X_, y, y_ = train_test_split(train_X, train_y, test_size=0.2)
dtc = tree.DecisionTreeClassifier(criterion='entropy')
y_test_dtc = trainAndRetrainClassifier(dtc, X, X_, y, y_, train_X, train_y, test_X)
savePredictions(y_test_dtc, 'dtc.csv')
def RunRandomForestClassifier(trainDf, testDf):
"""RunRandomForestClassifier
Runs a Random Forest Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
# Split the training set into training and validation sets
X, X_, y, y_ = train_test_split(train_X, train_y, test_size=0.2)
rfc = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features=None, max_depth=None, verbose=True)
y_test_rfc = trainAndRetrainClassifier(rfc, X, X_, y, y_, train_X, train_y, test_X)
savePredictions(y_test_rfc, 'rfc.csv')
def RunExtraTreesClassifier(trainDf, testDf):
"""RunExtraTreesClassifier
Runs a Extra Trees Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
# Split the training set into training and validation sets
X, X_, y, y_ = train_test_split(train_X, train_y, test_size=0.2)
etc = ExtraTreesClassifier(n_estimators=10, max_depth=None, random_state=0, verbose=True)
y_test_etc = trainAndRetrainClassifier(etc, X, X_, y, y_, train_X, train_y, test_X)
savePredictions(y_test_etc, 'etc.csv')
def RunAdaBoostClassifier(trainDf, testDf):
"""RunAdaBoostClassifier
Runs an Ada Boost Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
# Split the training set into training and validation sets
X, X_, y, y_ = train_test_split(train_X, train_y, test_size=0.2)
abc = AdaBoostClassifier(n_estimators=10)
y_test_abc = trainAndRetrainClassifier(abc, X, X_, y, y_, train_X, train_y, test_X)
savePredictions(y_test_abc, 'abc.csv')
def RunBaggingClassifier(trainDf, testDf):
"""RunBaggingClassifier
Runs a Bagging Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
# Split the training set into training and validation sets
X, X_, y, y_ = train_test_split(train_X, train_y, test_size=0.2)
bc = BaggingClassifier(KNeighborsClassifier(), max_samples=0.8, max_features=0.8, n_estimators=10)
y_test_bc = trainAndRetrainClassifier(bc, X, X_, y, y_, train_X, train_y, test_X)
savePredictions(y_test_bc, 'bc.csv')
def RunGradientBoostingClassifier(trainDf, testDf):
"""RunGradientBoostingClassifier
Runs a Gradient Boosting Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
# Split the training set into training and validation sets
X, X_, y, y_ = train_test_split(train_X, train_y, test_size=0.2)
gbc = GradientBoostingClassifier(n_estimators=10, learning_rate=1.0, max_depth=1, random_state=0)
y_test_gbc = trainAndRetrainClassifier(gbc, X, X_, y, y_, train_X, train_y, test_X)
savePredictions(y_test_gbc, 'bc.csv')
def RunVotingClassifier(trainDf, testDf):
"""RunVotingClassifier
Runs a Voting Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
for clf, label in zip([clf1, clf2, clf3, eclf], ['Logistic Regression', 'Random Forest', 'naive Bayes', 'Ensemble']):scores = cross_val_score(clf, train_X, train_y, cv=5, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))
def RunKNeighborsClassifier(trainDf, testDf):
"""RunKNeighborsClassifier
Runs a K-Neighbors Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
# Split the training set into training and validation sets
X, X_, y, y_ = train_test_split(train_X, train_y, test_size=0.2)
knc = KNeighborsClassifier(n_neighbors=1, weights='distance')
y_test_knc = trainAndRetrainClassifier(knc, X, X_, y, y_, train_X, train_y, test_X)
savePredictions(y_test_knc, 'knc.csv')
# ███╗ ██╗███████╗██╗ ██╗██████╗ █████╗ ██╗
# ████╗ ██║██╔════╝██║ ██║██╔══██╗██╔══██╗██║
# ██╔██╗ ██║█████╗ ██║ ██║██████╔╝███████║██║
# ██║╚██╗██║██╔══╝ ██║ ██║██╔══██╗██╔══██║██║
# ██║ ╚████║███████╗╚██████╔╝██║ ██║██║ ██║███████╗
# ╚═╝ ╚═══╝╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝
def RunMLPClassifier(trainDf, testDf):
"""RunMLPClassifier
Runs a Neural Network based on a MLP Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
# Split the training set into training and validation sets
X, X_, y, y_ = train_test_split(train_X, train_y, test_size=0.2)
mlpc = MLPClassifier(verbose=True)
mlpc.fit(X, y) # Train
MLPClassifier(hidden_layer_sizes=(56, 56, 56))
y_mlpc = mlpc.predict(X_) # Predict / y_rf represents the estimated targets as returned by our classifier
evaluateModel(y_, y_mlpc) # Evaluating model with validation set
mlpc.fit(train_X, train_y) # Retrain with entire training set
y_test_mlpc = mlpc.predict(test_X) # Predict with test set
# ██╗███╗ ██╗██████╗ ██╗ ██╗████████╗ ██████╗ ██╗ ██╗████████╗██████╗ ██╗ ██╗████████╗
# ██║████╗ ██║██╔══██╗██║ ██║╚══██╔══╝ ██╔═══██╗██║ ██║╚══██╔══╝██╔══██╗██║ ██║╚══██╔══╝
# ██║██╔██╗ ██║██████╔╝██║ ██║ ██║█████╗██║ ██║██║ ██║ ██║ ██████╔╝██║ ██║ ██║
# ██║██║╚██╗██║██╔═══╝ ██║ ██║ ██║╚════╝██║ ██║██║ ██║ ██║ ██╔═══╝ ██║ ██║ ██║
# ██║██║ ╚████║██║ ╚██████╔╝ ██║ ╚██████╔╝╚██████╔╝ ██║ ██║ ╚██████╔╝ ██║
# ╚═╝╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝
def load_dataframe(fileName):
"""load_dataframe
Loads a DataFrame from a .csv file.
Input:
fileName -- the name of the file (should be located in ./data/)
Output:
pd.read_csv() -- the DataFrame loaded by Pandas
"""
path = dataPath + fileName
return pd.read_csv(path, header=0)
def write_dataframe(df, fileName):
"""write_dataframe
Writes a DataFrame into a .csv file.
Input:
df -- the DataFrame to write
fileName -- the name of the file (will be saved in ./data/)
"""
path = dataPath + fileName
df.to_csv(path)
def savePredictions(predictions, fileName):
"""savePredictions
Saves predictions into a .csv file.
Input:
predictions -- predictions to save
fileName -- the name of the file (will be saved in ./data/predictions/)
"""
| pd.DataFrame({'Cover_Type': predictions}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import argparse
import pandas as pd
from zvt import init_log, zvt_env
from zvt.api.quote import get_stock_factor_schema
from zvt.contract import IntervalLevel
from zvt.contract.api import df_to_db
from zvt.contract.recorder import FixedCycleDataRecorder
from zvt.recorders.joinquant.common import to_jq_trading_level, to_jq_entity_id
from zvt.domain import Stock,StockFactorCommon
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import to_time_str, now_pd_timestamp, TIME_FORMAT_DAY, TIME_FORMAT_ISO8601
try:
from jqdatasdk import auth, logout, get_factor_values
except:
pass
class JqChinaStockFactorRecorder(FixedCycleDataRecorder):
entity_provider = 'joinquant'
entity_schema = Stock
data_schema = StockFactorCommon
# 数据来自jq
provider = 'joinquant'
def __init__(self,
exchanges=['sh', 'sz'],
schema=None,
entity_ids=None,
codes=None,
batch_size=10,
force_update=True,
sleeping_time=0,
default_size=2000,
real_time=False,
fix_duplicate_way='ignore',
start_timestamp=None,
end_timestamp=None,
level=IntervalLevel.LEVEL_1WEEK,
kdata_use_begin_time=False,
close_hour=15,
close_minute=0,
one_day_trading_minutes=4 * 60,
) -> None:
level = IntervalLevel(level)
self.data_schema = get_stock_factor_schema(schema)
self.jq_trading_level = to_jq_trading_level(level)
super().__init__('stock', exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute, level, kdata_use_begin_time, one_day_trading_minutes)
auth(zvt_env['jq_username'], zvt_env['jq_password'])
def on_finish(self):
super().on_finish()
logout()
def record(self, entity, start, end, size, timestamps):
now_date = to_time_str(now_pd_timestamp())
jq_entity_di = to_jq_entity_id(entity)
if size > 1000:
start_end_size = self.evaluate_start_end_size_timestamps(entity)
size = 1000
bdate= pd.bdate_range(start=start_end_size[0], periods=size)
self.start_timestamp = bdate[0]
self.end_timestamp = bdate[-1] if bdate[-1] <= now_pd_timestamp() else now_pd_timestamp()
if not self.end_timestamp:
factor_data = get_factor_values(securities=[jq_entity_di],
factors=self.data_schema.important_cols(),
end_date=now_date,
count=size)
else:
end_timestamp = to_time_str(self.end_timestamp)
if self.start_timestamp:
start_timestamp = to_time_str(self.start_timestamp)
else:
bdate_list = pd.bdate_range(end=end_timestamp, periods=size)
start_timestamp = to_time_str(bdate_list[0])
factor_data = get_factor_values(securities=[to_jq_entity_id(entity)],
factors=self.data_schema.important_cols(),
start_date=start_timestamp,
end_date=end_timestamp)
df_list = [values.rename(columns={jq_entity_di: key}) for key, values in factor_data.items()]
if len(df_list) != 0:
df = pd.concat(df_list,join='inner',sort=True,axis=1).sort_index(ascending=True)
else:
df = pd.DataFrame(columns=self.data_schema.important_cols(),index=pd.bdate_range(start=start_timestamp,end=end_timestamp))
if pd_is_not_null(df):
df_fill = pd.DataFrame(index=pd.bdate_range(start=start_timestamp, end=end_timestamp)) if self.end_timestamp else | pd.DataFrame(index=df.index) | pandas.DataFrame |
import numpy as np
import pandas as pd
from gmpy2 import bit_mask
from rulelist.rulelistmodel.gaussianmodel.gaussiantarget import GaussianTargets
class TestGaussianTargets(object):
def test_onetarget(self):
dictoutput = {"target1": np.arange(100)}
input_target_data = pd.DataFrame(data=dictoutput)
expected_number_targets = 1
expected_bit_array = bit_mask(100)
expected_mean_vector = np.array([49.5])
expected_variance_vector = np.var([*range(100)])
output_gaussiantargets= GaussianTargets(input_target_data)
assert expected_bit_array == output_gaussiantargets.bit_array
assert expected_number_targets == len(output_gaussiantargets.mean)
assert expected_number_targets == len(output_gaussiantargets.variance)
np.testing.assert_array_equal(expected_mean_vector, output_gaussiantargets.mean)
np.testing.assert_array_equal(expected_variance_vector, output_gaussiantargets.variance)
def test_twotargets(self):
dictoutput = {"target1": np.arange(100), "target2": np.ones(100)}
input_target_data = pd.DataFrame(data=dictoutput)
expected_number_targets = 2
expected_bit_array = bit_mask(100)
expected_mean_vector = np.array([49.5,1])
expected_variance_vector = [np.var([*range(100)]),0]
output_gaussiantargets= GaussianTargets(input_target_data)
assert expected_bit_array == output_gaussiantargets.bit_array
assert expected_number_targets == len(output_gaussiantargets.mean)
assert expected_number_targets == len(output_gaussiantargets.variance)
np.testing.assert_array_equal(expected_mean_vector, output_gaussiantargets.mean)
np.testing.assert_array_equal(expected_variance_vector, output_gaussiantargets.variance)
def test_onlyzeros(self):
dictoutput = {"target1": np.zeros(100)}
input_target_data = | pd.DataFrame(data=dictoutput) | pandas.DataFrame |
import matplotlib
import matplotlib.pyplot as plt
from time import sleep
from subprocess import Popen, PIPE, STDOUT
from IPython.display import clear_output, display
import urllib.parse
from io import BytesIO
from zipfile import ZipFile
import urllib.request
import datetime
import os
from subprocess import Popen, PIPE, STDOUT
import pandas
import numpy
from IPython.display import display, HTML
def readDataAndGenerateCSV(scenario):
if scenario == 1 or scenario == 2:
zNormalized = 1
if scenario == 3:
zNormalized = 0
p = Popen(['java', '-jar', 'StockDataGenerateCSV-1.0-jar-with-dependencies.jar', 'properties_scenario' + str(scenario) + '.conf'], stdout=PIPE, stderr=STDOUT)
for line in p.stdout:
if line[2] == 115:
print(str(line)[2:-3])
sleep(2)
if line[2] == 101:
clear_output(wait=True)
print(str(line)[2:-3])
else:
print(str(line)[2:-3])
if zNormalized == 1:
stocks_file = open("stockTS.csvzNorm.csv")
else:
stocks_file = open("stockTS.csv")
stocks_lines = stocks_file.readlines()
stocks = {}
for s in stocks_lines:
stock_name = s.split(",")[0]
stock_values_str = s.split("\n")[0].split(",")[3:264]
stock_values = []
for st in stock_values_str:
stock_values.append(float(st))
stocks[stock_name] = stock_values
return stocks
def discoverBundles(scenario):
if scenario > 0:
p = Popen(['java', '-jar', 'BundleDiscovery-1.0-jar-with-dependencies.jar', 'properties_scenario' + str(scenario) + '.conf'], stdout=PIPE, stderr=STDOUT)
for line in p.stdout:
if line[2] == 115:
print(str(line)[2:-3])
sleep(2)
if line[2] == 101:
clear_output(wait=True)
print(str(line)[2:-3])
else:
print(str(line)[2:-3])
else:
p = Popen(['java', '-jar', './GET_DB_DATA.jar'], stdout=PIPE, stderr=STDOUT)
for line in p.stdout:
if line[2] == 115:
print(str(line)[2:-3])
sleep(2)
if line[2] == 101:
clear_output(wait=True)
print(str(line)[2:-3])
else:
print(str(line)[2:-3])
def plotAllData(stock_market, year, stocks):
xlim_1 = 0
xlim_2 = 261
plt.rcParams['figure.figsize'] = [28, 14]
plt.title('Stocks of ' + stock_market + '\'s Stock Market', fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Day of Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
plt.grid(True)
for k, v in stocks.items():
plt.plot(v)
#return bundles_members, bundles_duration
def plotAllBundles(stock_market, year, stocks):
results_file = open("results.txt","r")
results_lines = results_file.readlines()
xlim_1 = 0
xlim_2 = 261
bundles_members = {}
bundles_duration = {}
for b in results_lines:
bundle_name = b.split(";")[0]
duration = b.split(";")[2]
#members = b.split("\n")[0].split(":")[1].split(",")
members = b.split(";")[1].split(",")
bundles_members[bundle_name] = members
bundles_duration[bundle_name] = duration
for k, v in bundles_members.items():
duration = bundles_duration[k]
x1 = int(duration.split("-")[0][1:])
x2 = int(duration.split("-")[1][:-2])
minimum = []
maximum = []
for i in list(range(x2-x1)):
minimum.append(10000000000)
maximum.append(-1)
plt.rcParams['figure.figsize'] = [28, 14]
plt.title('Discovered Bundles of Stocks (' + stock_market + '\'s Stock Market)', fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Day of Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
for member in bundles_members[k]:
ts = stocks[member]
idx = 0
for t in list(range(x1, x2)):
if ts[t] < minimum[idx]:
minimum[idx] = ts[t]
if ts[t] > maximum[idx]:
maximum[idx] = ts[t]
idx += 1
p = plt.plot(list(range(x1, x2)), maximum, linewidth=5.0)
plt.plot(list(range(x1, x2)), minimum, color=p[0].get_color(), linewidth=5.0)
plt.grid(True)
plt.fill_between(list(range(x1, x2)), minimum, maximum, color=p[0].get_color(), alpha=0.15)
def plotSelectedBundle(scenario, bundle_to_visualize, stock_market, year, stocks):
results_file = open("results.txt","r")
results_lines = results_file.readlines()
xlim_1 = 0
xlim_2 = 261
bundles_members = {}
bundles_duration = {}
for b in results_lines:
bundle_name = b.split(";")[0]
duration = b.split(";")[2]
#members = b.split("\n")[0].split(":")[1].split(",")
members = b.split(";")[1].split(",")
bundles_members[bundle_name] = members
bundles_duration[bundle_name] = duration
if bundle_to_visualize == -1:
if scenario == 1 or scenario == 2:
bundle_to_visualize = 'Bundle_0'
if scenario == 3:
bundle_to_visualize = 'Bundle_100'
else:
bundle_to_visualize = bundle_to_visualize
duration = bundles_duration[bundle_to_visualize]
x1 = int(duration.split("-")[0][1:])
x2 = int(duration.split("-")[1][:-2])
minimum = []
maximum = []
for i in list(range(x2-x1)):
minimum.append(10000000000)
maximum.append(-1)
plt.rcParams['figure.figsize'] = [28, 14]
plt.title('Discovered Bundles of ' + bundle_to_visualize + ' (' + stock_market + '\'s Stock Market)', fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Day of Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
print('BUNDLE MEMBERS:')
for member in bundles_members[bundle_to_visualize]:
ts = stocks[member]
idx = 0
for t in list(range(x1, x2)):
if ts[t] < minimum[idx]:
minimum[idx] = ts[t]
if ts[t] > maximum[idx]:
maximum[idx] = ts[t]
idx += 1
plt.plot(ts)
print(member)
plt.axvline(x = x1)
plt.axvline(x = x2)
plt.plot(list(range(x1, x2)), maximum, color='#539ecd', linewidth=5.0)
plt.plot(list(range(x1, x2)), minimum, color='#539ecd', linewidth=5.0)
plt.grid(True)
plt.fill_between(list(range(x1, x2)), minimum, maximum, color='#539ecd', alpha=0.25)
def segmentData(year, interval, symbol, zNormalize):
if os.path.exists("singleStockTS.csv"):
os.remove("singleStockTS.csv")
count1 = 1
count2 = 0
string_to_write = ""
for i in range(1,12):
month = str(i)
if i < 10:
month = "0" + month
link = "http://5.175.24.176/Qualimaster/history/" + str(interval) + "/" + year + month + "_" + symbol + ".zip"
link = urllib.parse.urlsplit(link)
link = list(link)
link[2] = urllib.parse.quote(link[2])
link = urllib.parse.urlunsplit(link)
url = urllib.request.urlopen(link)
with ZipFile(BytesIO(url.read())) as my_zip_file:
for contained_file in my_zip_file.namelist():
for line in my_zip_file.open(contained_file).readlines():
line = line.decode("utf-8")
date = line.split(",")[0]
day_of_week = datetime.datetime.strptime(date, '%m/%d/%Y').strftime('%a')
if day_of_week == "Mon" and prev_day_of_week == "Fri":
if count2 == 45:
with open('singleStockTS.csv', 'a') as the_file:
the_file.write(string_to_write + "\n")
count1 += 1
string_to_write = "Week_" + str(count1) + ",X,Y"
count2 = 0
if count1>1 and count1<52:
string_to_write += "," + str(float(line.split(",")[5]))
count2 += 1
prev_day_of_week = day_of_week
p = Popen(['java', '-jar', 'StockDataGenerateCSV-1.0-jar-with-dependencies.jar', str(zNormalize), 'singleStockTS.csv', str(45)], stdout=PIPE, stderr=STDOUT)
for line in p.stdout:
if line[2] == 115:
print(str(line)[2:-3])
sleep(2)
if line[2] == 101:
clear_output(wait=True)
print(str(line)[2:-3])
else:
print(str(line)[2:-3])
if zNormalize == 1:
stocks_file = open("singleStockTS.csvzNorm.csv")
else:
stocks_file = open("singleStockTS.csv")
stocks_lines = stocks_file.readlines()
stocks = {}
for s in stocks_lines:
stock_name = s.split(",")[0]
stock_values_str = s.split("\n")[0].split(",")[3:264]
stock_values = []
for st in stock_values_str:
stock_values.append(float(st))
stocks[stock_name] = stock_values
return stocks
def plotAllData2(symbol, year, stocks):
xlim_1 = 0
xlim_2 = 44
plt.rcParams['figure.figsize'] = [28, 14]
plt.title(symbol, fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Hour of Week for Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
plt.grid(True)
for k, v in stocks.items():
plt.plot(v)
def plotAllBundles2(symbol, year, stocks):
results_file = open("results.txt","r")
results_lines = results_file.readlines()
xlim_1 = 0
xlim_2 = 44
bundles_members = {}
bundles_duration = {}
for b in results_lines:
bundle_name = b.split(";")[0]
duration = b.split(";")[2]
#members = b.split("\n")[0].split(":")[1].split(",")
members = b.split(";")[1].split(",")
bundles_members[bundle_name] = members
bundles_duration[bundle_name] = duration
for k, v in bundles_members.items():
duration = bundles_duration[k]
x1 = int(duration.split("-")[0][1:])
x2 = int(duration.split("-")[1][:-2])
minimum = []
maximum = []
for i in list(range(x2-x1)):
minimum.append(10000000000)
maximum.append(-1)
plt.rcParams['figure.figsize'] = [28, 14]
plt.title('Discovered Bundles of ' + symbol, fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Hour of Week for Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
for member in bundles_members[k]:
ts = stocks[member]
idx = 0
for t in list(range(x1, x2)):
if ts[t] < minimum[idx]:
minimum[idx] = ts[t]
if ts[t] > maximum[idx]:
maximum[idx] = ts[t]
idx += 1
p = plt.plot(list(range(x1, x2)), maximum, linewidth=5.0)
plt.plot(list(range(x1, x2)), minimum, color=p[0].get_color(), linewidth=5.0)
plt.grid(True)
plt.fill_between(list(range(x1, x2)), minimum, maximum, color=p[0].get_color(), alpha=0.15)
def plotSelectedBundle2(scenario, bundle_to_visualize, symbol, year, stocks):
results_file = open("results.txt","r")
results_lines = results_file.readlines()
xlim_1 = 0
xlim_2 = 44
bundles_members = {}
bundles_duration = {}
for b in results_lines:
bundle_name = b.split(";")[0]
duration = b.split(";")[2]
#members = b.split("\n")[0].split(":")[1].split(",")
members = b.split(";")[1].split(",")
bundles_members[bundle_name] = members
bundles_duration[bundle_name] = duration
duration = bundles_duration[bundle_to_visualize]
x1 = int(duration.split("-")[0][1:])
x2 = int(duration.split("-")[1][:-2])
minimum = []
maximum = []
for i in list(range(x2-x1)):
minimum.append(10000000000)
maximum.append(-1)
plt.rcParams['figure.figsize'] = [28, 14]
plt.title('Discovered Bundles of ' + bundle_to_visualize, fontsize=35)
plt.ylabel('Close Value', fontsize=35)
plt.xlabel('Working Hour of Week for Year ' + year, fontsize=35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
axes = plt.gca()
axes.set_xlim([xlim_1, xlim_2])
print('BUNDLE MEMBERS:')
for member in bundles_members[bundle_to_visualize]:
ts = stocks[member]
idx = 0
for t in list(range(x1, x2)):
if ts[t] < minimum[idx]:
minimum[idx] = ts[t]
if ts[t] > maximum[idx]:
maximum[idx] = ts[t]
idx += 1
plt.plot(ts)
print(member)
plt.axvline(x = x1)
plt.axvline(x = x2)
plt.plot(list(range(x1, x2)), maximum, color='#539ecd', linewidth=5.0)
plt.plot(list(range(x1, x2)), minimum, color='#539ecd', linewidth=5.0)
plt.grid(True)
plt.fill_between(list(range(x1, x2)), minimum, maximum, color='#539ecd', alpha=0.25)
def getSimilarBundles(sort_by):
pandas.set_option('display.max_colwidth', -1)
p = Popen(['java', '-jar', 'simjoin-0.0.1-SNAPSHOT-jar-with-dependencies.jar', 'ssjoin_config.properties'], stdout=PIPE, stderr=STDOUT)
f = open('ssjoin_out.txt', "r")
lines_ssjoin = f.readlines()
f.close()
f = open('results.txt', "r")
lines_bundles = f.readlines()
f.close()
first_time = True
for line in lines_ssjoin:
res_list = []
bundle1 = lines_bundles[int(line.split(",")[0])]
bundle1_name = bundle1.split(";")[0]
bundle1_members = bundle1.split(";")[1]
bundle1_interval_start = int(bundle1.split(";")[2].split("-")[0][1:])
bundle1_interval_end = int(bundle1.split(";")[2].split("-")[1][:-2])
bundle2 = lines_bundles[int(line.split(",")[1])]
bundle2_name = bundle2.split(";")[0]
bundle2_members = bundle2.split(";")[1]
bundle2_interval_start = int(bundle2.split(";")[2].split("-")[0][1:])
bundle2_interval_end = int(bundle2.split(";")[2].split("-")[1][:-2])
res_list.append(bundle1_name + " " + bundle2_name)
similarity = float(line.split(",")[2].split("\n")[0][:-1])
res_list.append(similarity)
x = range(bundle1_interval_start, bundle1_interval_end+1)
x_len = bundle1_interval_end-bundle1_interval_start
y = range(bundle2_interval_start, bundle2_interval_end+1)
y_len = bundle2_interval_end-bundle2_interval_start
xs = set(x)
intersect_length = len(xs.intersection(y))
interval_similarity = 0
if x_len > y_len:
interval_similarity = intersect_length/x_len
else:
interval_similarity = intersect_length/y_len
res_list.append(interval_similarity)
bundle1_set = set(''.join(bundle1_members).split(","))
bundle2_set = set(''.join(bundle2_members).split(","))
common = bundle1_set.intersection(bundle2_set)
res_list.append(common)
not_common = bundle1_set.symmetric_difference(bundle2_set)
res_list.append(not_common)
if first_time == True:
newArray = numpy.array(res_list)
first_time = False
else:
newArray = numpy.vstack([newArray, res_list])
field_list = ["Bundle Pair", "Member Similarity", "Interval Similarity", "Common Members", "Non-Common Members"]
members_incr = range(1, len(newArray)+1)
display( | pandas.DataFrame(newArray, members_incr, field_list) | pandas.DataFrame |
#!/Users/lindsaychuang/miniconda3/envs/obspy/bin/python
import pandas as pd
from obspy import UTCDateTime, read, Stream, Trace
import dash_html_components as html
import plotly.graph_objects as go
def wrap_wfs(st):
fig = go.Figure()
for i in range(0, len(st)):
fig.add_trace(
go.Scattergl(
x=st[i].times(),
y=st[i].data*0.85 + i + 1,
name=f'{st[i].stats["station"]}-{st[i].stats["channel"]}')
)
return fig
def norm_wfs(st, norm):
# ---- normalize
if norm == "Normalize":
st.normalize(global_max=False)
elif norm == "Original":
st = st
return st
def filters(st, type, lowf, highf):
if type == "raw":
st = st
elif type == "bandpass":
st.taper(max_percentage=0.05)
st.filter('bandpass', freqmin=lowf, freqmax=highf)
elif type == "lowpass":
st.taper(max_percentage=0.05)
st.filter('lowpass', freq=lowf)
elif type == "highpass":
st.taper(max_percentage=0.05)
st.filter('highpass', freq=highf)
else:
raise Exception("Unexpected filter type")
return st
def load_wfs(path, stas, btime, etime, comps):
allwfs = Stream()
for sta in stas:
if comps == 'N' or comps == 'E' or comps == 'Z':
try:
st = read(f'{path}/*{sta}*.*{comps}.*', starttime=btime, endtime=etime)
allwfs += st
except:
print(f'can not find station: {sta} component: {comps}')
elif comps == 'All':
st = read(f'{path}/*{sta}*', starttime=btime, endtime=etime)
allwfs += st
else:
raise Exception("Unexpected components")
return allwfs
def wrap_map_object_eq(path_earthquake_cata,pd_cata, map_data):
pd_cata = pd.read_csv(f'{path_earthquake_cata}/{pd_cata[0]}')
map_data[0]["lat"] = pd_cata.evla.values
map_data[0]["lon"] = pd_cata.evlo.values
map_data[0]['marker']['size'] = pd_cata.mag.values
map_data[0]["text"] = pd_cata.time.values
map_data[0]['marker']['color'] = pd_cata.evdp.values
return map_data
def wrap_map_object_st(path_station_cata,st_cata, map_data):
st_cata = pd.read_csv(f'{path_station_cata}/{st_cata[0]}')
map_data[1]["lat"] = st_cata.stla.values
map_data[1]["lon"] = st_cata.stlo.values
map_data[1]["text"] = st_cata.station.values
return map_data
def quick_analysis_eq_catalog(path,files):
pd_cata = pd.read_csv(f'{path}/{files[0]}')
pd_datetime = pd.to_datetime(pd_cata[["year", "month", "day", "hour", "minute", "second"]])
earlist = pd_datetime.min()
latest = pd_datetime.max()
total_event_number = len(pd_cata)
return earlist, latest, total_event_number
def quick_analysis_phase_catalog(path,files):
pd_cata = | pd.read_csv(f'{path}/{files[0]}') | pandas.read_csv |
#!/usr/bin/python3
# -----------------------------------------------------------
# Calculator to add countdown features to the dataset
# -----------------------------------------------------------
import calendar
import datetime
from enum import Enum
import dateutil
import lunardate
import convertdate
import pandas as pd
from pandas.api.types import is_datetime64_any_dtype as is_datetime
class Events(Enum):
"""
A class used to represent all Events as Enum
"""
EPIPHANIE = "epiphanie"
CHANDELEUR = "chandeleur"
MARDI_GRAS = "mardi_gras"
HALLOWEEN = "halloween"
NOUVEL_AN_CHINOIS = "nouvel_an_chinois"
AID = "aid"
RAMADAN = "ramadan"
def compute_last_weekday_of_a_month(year, month, weekday):
"""
given a year, month and weekday (1 = monday, 2 = tuesday, ..., 7 = sunday)
returns the last occurence of this weekday in this month of this year as a date
"""
last_day = max(
week[-1 + weekday] for week in calendar.monthcalendar(year, month)
)
return datetime.date(year, month, last_day)
def compute_first_weekday_of_a_month(year, month, weekday):
"""
given a year, month and weekday (1 = monday, 2 = tuesday, ..., 7 = sunday)
returns the first occurence of this weekday in this month of this year as a date
"""
first_day = min(
week[-1 + weekday] for week in calendar.monthcalendar(year, month) if week[-1 + weekday] > 0
)
return datetime.date(year, month, first_day)
def compute_events_dates(years):
"""
For a given list of years compute a dict of events with a list of dates for each event
"""
events_dates_by_event = {}
for event in Events:
events_dates_by_event[event] = []
for year in years:
# epiphanie
events_dates_by_event[Events.EPIPHANIE].append(datetime.date(year, 1, 6))
# chandeleur
events_dates_by_event[Events.CHANDELEUR].append(datetime.date(year, 2, 2))
# date_easter = easter(year)
date_easter = dateutil.easter.easter(year, 3)
# mardi_gras
events_dates_by_event[Events.MARDI_GRAS].append(date_easter - datetime.timedelta(days=47))
# Halloween : 31/10
events_dates_by_event[Events.HALLOWEEN].append(datetime.date(year, 10, 31))
# Ramadan
islamic_year = convertdate.islamic.from_gregorian(year, 1, 1)[0]
ramadan = datetime.datetime(
convertdate.islamic.to_gregorian(islamic_year, 9, 1)[0],
convertdate.islamic.to_gregorian(islamic_year, 9, 1)[1],
convertdate.islamic.to_gregorian(islamic_year, 9, 1)[2],
)
events_dates_by_event[Events.RAMADAN].append(ramadan.date())
# aid
events_dates_by_event[Events.AID].append((ramadan + datetime.timedelta(days=30)).date())
# chinese new year
events_dates_by_event[Events.NOUVEL_AN_CHINOIS].append(lunardate.LunarDate(year, 1, 1, 0).toSolarDate())
return events_dates_by_event
def add_countdown_ago(date, event, events_dates_per_event):
"""
Given a date and an even,
compute the number of days since the previous occurence of this events within events_dates_per_event
"""
countdown = []
for special_date in events_dates_per_event[event]:
date_count_down = (special_date - date).days
if date_count_down <= 0:
countdown.append(date_count_down)
return -1 * max(countdown)
def add_countdown_in(date, event, events_dates_per_event):
"""
Given a date and an even,
compute the number of days until the next occurence of this events within events_dates_per_event
"""
countdown = []
for special_date in events_dates_per_event[event]:
date_count_down = (special_date - date).days
if date_count_down >= 0:
countdown.append(date_count_down)
return min(countdown)
# pylint: disable=W0613
def add_feature_events_countdown(dtf, date_col, date_format):
"""
Given a dataframe dtf, a date_col and its format date_format generate two cols:
- number of days before next event occurence
- number of day since last event occurence
for each of the following events:
- epiphanie
- chandeleur
- mardi_gras
- halloween
- nouvel_an_chinois
- aid
- ramadan
"""
col_retyped = False
if not is_datetime(dtf[date_col]):
new_date_col = f'{date_col}_retyped'
dtf[new_date_col] = | pd.to_datetime(dtf[date_col], format=date_format) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import warnings
# warnings.filterwarnings('ignore')
# In[2]:
# import libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import sparse
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
import pickle
# # Amazon Employee Access Challenge
# In[3]:
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
# In[4]:
train.shape
# In[5]:
test.shape
# In[6]:
y_train = train['ACTION']
# In[7]:
y_train.shape
# In[8]:
train_data = train.drop('ACTION', axis=1)
train_data.shape
# In[9]:
test_data = test.drop('id', axis=1)
test_data.shape
# ## Common Variables
# In[10]:
# define variables
random_state = 42
cv = 5
scoring = 'roc_auc'
verbose=2
# ## Common functions
# In[11]:
def save_submission(predictions, filename):
'''
Save predictions into csv file
'''
global test
submission = pd.DataFrame()
submission["Id"] = test["id"]
submission["ACTION"] = predictions
filepath = "result/sampleSubmission_"+filename
submission.to_csv(filepath, index = False)
# In[12]:
def print_graph(results, param1, param2, xlabel, ylabel, title='Plot showing the ROC_AUC score for various hyper parameter values'):
'''
Plot the graph
'''
plt.plot(results[param1],results[param2]);
plt.grid();
plt.xlabel(xlabel);
plt.ylabel(ylabel);
plt.title(title);
# In[13]:
def get_rf_params():
'''
Return dictionary of parameters for random forest
'''
params = {
'n_estimators':[10,20,50,100,200,500,700,1000],
'max_depth':[1,2,5,10,12,15,20,25],
'max_features':[1,2,3,4,5],
'min_samples_split':[2,5,7,10,20]
}
return params
# In[14]:
def get_xgb_params():
'''
Return dictionary of parameters for xgboost
'''
params = {
'n_estimators': [10,20,50,100,200,500,750,1000],
'learning_rate': uniform(0.01, 0.6),
'subsample': uniform(),
'max_depth': [3, 4, 5, 6, 7, 8, 9],
'colsample_bytree': uniform(),
'min_child_weight': [1, 2, 3, 4]
}
return params
# ### We will try following models
#
# 1. KNN
# 2. SVM
# 3. Logistic Regression
# 4. Random Forest
# 5. Xgboost
# ## Build Models on the raw data
# ## 1.1 KNN with raw features
# In[15]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[16]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[17]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[18]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[19]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_data,y_train)
# In[20]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, "knn_raw.csv")
# 
# ## 1.2 SVM with raw feature
# In[21]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[22]:
best_c=best_model.best_params_['C']
best_c
# In[23]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[24]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[25]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_data,y_train)
# In[26]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'svm_raw.csv')
# 
# ## 1.3 Logistic Regression with Raw Feature
# In[27]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[28]:
best_c=best_model.best_params_['C']
best_c
# In[29]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[30]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[31]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_data,y_train)
# In[32]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'lr_raw.csv')
# 
# ## 1.4 Random Forest with Raw Feature
# In[33]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[34]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[35]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[36]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_data,y_train)
# In[37]:
features=train_data.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# ## Features Observations:
#
# 1. MGR_ID is the most important feature followed by RESOURCE and ROLE_DEPTNAME
# In[38]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'rf_raw.csv')
# 
# ## 1.5 Xgboost with Raw Feature
# In[39]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_data,y_train)
# In[40]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[41]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[42]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_data,y_train)
# In[43]:
features=train_data.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[44]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'xgb_raw.csv')
# 
# 
# In[45]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','Raw', 0.67224, 0.68148])
x.add_row(['SVM', 'Raw', 0.50286, 0.51390])
x.add_row(['Logistic Regression', 'Raw', 0.53857, 0.53034])
x.add_row(['Random Forest', 'Raw', 0.87269, 0.87567])
x.add_row(['Xgboost', 'Raw', 0.86988, 0.87909])
print(x)
# # Observations:
#
# 1. Xgboost perform best on the raw features
# 2. Random forest also perform good on raw features
# 3. Tree based models performs better than linear models for raw features
# ## Build model on one hot encoded features
# ### 2.1 KNN with one hot encoded features
# In[46]:
train_ohe = sparse.load_npz('data/train_ohe.npz')
test_ohe = sparse.load_npz('data/test_ohe.npz')
train_ohe.shape, test_ohe.shape, y_train.shape
# In[47]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=4)
best_model = clf.fit(train_ohe,y_train)
# In[48]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[49]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[50]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[51]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_ohe,y_train)
# In[52]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, "knn_ohe.csv")
# 
# ## 2.2 SVM with one hot encoded features
# In[53]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[54]:
best_c=best_model.best_params_['C']
best_c
# In[55]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[56]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[57]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_ohe,y_train)
# In[58]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'svm_ohe.csv')
# 
# ## 2.3 Logistic Regression with one hot encoded features
# In[59]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[60]:
best_c=best_model.best_params_['C']
best_c
# In[61]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[62]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[63]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_ohe,y_train)
# In[64]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'lr_ohe.csv')
# 
# ## 2.4 Random Forest with one hot encoded features
# In[65]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[66]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[67]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[68]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_ohe,y_train)
# In[69]:
# features=train_ohe.columns
# importance=model.feature_importances_
# features=pd.DataFrame({'features':features,'value':importance})
# features=features.sort_values('value',ascending=False)
# sns.barplot('value','features',data=features);
# plt.title('Feature Importance');
# In[70]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'rf_ohe.csv')
# 
# ## 2.5 Xgboost with one hot encoded features
# In[71]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_ohe,y_train)
# In[72]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[73]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[74]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_ohe,y_train)
# In[75]:
# features=train_ohe.columns
# importance=model.feature_importances_
# features=pd.DataFrame({'features':features,'value':importance})
# features=features.sort_values('value',ascending=False)
# sns.barplot('value','features',data=features);
# plt.title('Feature Importance');
# In[76]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'xgb_ohe.csv')
# 
# 
# In[77]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','ohe', 0.81657, 0.81723])
x.add_row(['SVM', 'ohe', 0.87249, 0.87955])
x.add_row(['Logistic Regression', 'ohe', 0.87436, 0.88167])
x.add_row(['Random Forest', 'ohe', 0.84541, 0.84997])
x.add_row(['Xgboost', 'ohe', 0.84717, 0.85102])
print(x)
# # Observations:
#
# 1. One hot encoding features performs better than other encoding technique
# 2. Linear models (Logistic Regression and SVM) performs better on higher dimension
# # 3 Build Model on frequency encoding feature
# ## 3.1 KNN with frequency encoding
# In[78]:
train_df_fc = pd.read_csv('data/train_df_fc.csv')
test_df_fc = pd.read_csv('data/test_df_fc.csv')
# In[79]:
train_df_fc.shape, test_df_fc.shape, y_train.shape
# In[80]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[81]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[82]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[83]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[84]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_df_fc,y_train)
# In[85]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, "knn_fc.csv")
# 
# ## 3.2 SVM with frequency encoding
# In[86]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[87]:
best_c=best_model.best_params_['C']
best_c
# In[88]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[89]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[90]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_df_fc,y_train)
# In[91]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, 'svm_fc.csv')
# 
# ## 3.3 Logistic Regression with frequency encoding
# In[92]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[93]:
best_c=best_model.best_params_['C']
best_c
# In[94]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[95]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[96]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_df_fc,y_train)
# In[97]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, 'lr_fc.csv')
# 
# ## 3.4 Random Forest with frequency encoding
# In[98]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[99]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[100]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[101]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_df_fc,y_train)
# In[103]:
features=train_df_fc.columns
importance=model.feature_importances_
features= | pd.DataFrame({'features':features,'value':importance}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''prepare data for random forest classifier'''
import logging
import os
import datetime as dt
import pandas as pd
#from sklearn.preprocessing import OneHotEncoder
logger = logging.getLogger('main')
def load_data(fileDir='general'):
# get today and create path for saving investment list
dirName = os.path.join('data', fileDir)
today = dt.datetime.now()
todayStr = today.strftime('%Y_%m_%d')
filename = f'{todayStr}.csv'
filepath = os.path.join(dirName, filename)
# load raw data
dataRaw = pd.read_csv(filepath, low_memory=False)
length = len(dataRaw)/1000
logger.info(f'Loaded Data: {length:.1f}k credits')
return dataRaw
##############################################################################
def clean_data(dataRaw, mode='train'):
if mode != 'train' or mode != 'apply':
pass
if mode == 'train':
# create label
if True:
dataRaw.loc[dataRaw.WorseLateCategory.isnull(), 'Defaulted'] = 0
dataRaw.loc[dataRaw.WorseLateCategory == '1-7', 'Defaulted'] = 0
dataRaw.loc[dataRaw.WorseLateCategory == '8-15', 'Defaulted'] = 0
dataRaw.loc[dataRaw.WorseLateCategory == '16-30', 'Defaulted'] = 0
dataRaw.loc[dataRaw.WorseLateCategory == '31-60', 'Defaulted'] = 0
dataRaw.loc[dataRaw.WorseLateCategory == '61-90', 'Defaulted'] = 0
dataRaw.loc[dataRaw.WorseLateCategory == '91-120', 'Defaulted'] = 0
dataRaw.loc[dataRaw.WorseLateCategory == '121-150', 'Defaulted'] = 0
dataRaw.loc[dataRaw.WorseLateCategory == '151-180', 'Defaulted'] = 0
dataRaw.loc[dataRaw.WorseLateCategory == '180+', 'Defaulted'] = 1
#dataRaw.loc[dataRaw.Status == 'Repaid', 'Defaulted'] = 0
else:
dataRaw.loc[dataRaw.Status == 'Late', 'Defaulted'] = 1
dataRaw.loc[dataRaw.Status == 'Current', 'Defaulted'] = 0
dataRaw.loc[dataRaw.Status == 'Repaid', 'Defaulted'] = 0
# define features
columnsFeatures = ['BiddingStartedOn', # will be removed
'Age', # continuous
'Amount', # continuous
'AmountOfPreviousLoansBeforeLoan', # continuous
#'ApplicationSignedHour', # 0-23
#'ApplicationSignedWeekday', # 1-7
'AppliedAmount', # continuous
'BidsApi', # continuous
'BidsManual', # continuous
'BidsPortfolioManager', # continuous
'Country', #
'Education', #
#'EmploymentDurationCurrentEmployer', #
'ExistingLiabilities', # continuous
'Gender', # 0, 1, 2
#'HomeOwnershipType', #
'IncomeTotal', # continuous
'Interest', # continuous
#'LanguageCode', # 1:26
'LiabilitiesTotal', # continuous
'LoanDuration', # continuous
'MonthlyPayment', # continuous
#'MonthlyPaymentDay',
'NewCreditCustomer', # True False
'NoOfPreviousLoansBeforeLoan', # continuous
'PreviousRepaymentsBeforeLoan', # continuous
'ProbabilityOfDefault', # continuous
'Rating',
'VerificationType'
]
if mode == 'train':
columnsFeatures.append('Defaulted')
# extract features from raw
dataPresorted = dataRaw[columnsFeatures].copy()
# get row count before cleaning
presortedLen = len(dataPresorted.index)
# format to datetime and sort
dataPresorted.loc[:, 'BiddingStartedOn'] = pd.to_datetime(dataPresorted.BiddingStartedOn).copy()
dataPresorted = dataPresorted.sort_values(['BiddingStartedOn'], ascending=True)
if mode == 'train':
# remove data prior three months from today
today = dt.datetime.now()
dataSorted = dataPresorted.loc[dataPresorted['BiddingStartedOn'] <= (today - dt.timedelta(days=90)),:].copy()
sortedLen = len(dataSorted.index)
removed = (presortedLen - sortedLen) / presortedLen *100
logger.info(f'{removed:.2f} % time filtered')
else:
dataSorted = dataPresorted
sortedLen = len(dataSorted.index)
removed = 0
# additional features
dataSorted.loc[:, 'AppliedRatio'] = dataSorted.Amount.div(dataSorted.AppliedAmount)
dataSorted.loc[:, 'IncomeCreditRatio'] = dataSorted.MonthlyPayment.div(dataSorted.IncomeTotal)
dataSorted.loc[dataSorted.IncomeCreditRatio > 2, 'IncomeCreditRatio'] = 2
# replace NaN element with standard values
values = {'AmountOfPreviousLoansBeforeLoan': 0,
'Education': 0,
'Gender': 2,
'HomeOwnershipType': 10,
'EmploymentDurationCurrentEmployer': 'Other',
'MonthlyPayment': 0,
'NoOfPreviousLoansBeforeLoan': 0,
'PreviousRepaymentsBeforeLoan': 0,
'VerificationType': 0
}
dataClean = dataSorted.fillna(values)
# remove remaining nan
dataClean = dataClean.dropna(axis=0)
cleanLen = len(dataClean.index)
removed = (presortedLen - cleanLen) / presortedLen *100 - removed
if mode =='train':
logger.info(f'{removed:.2f} % NaN filtered')
elif mode == 'apply' and removed > 0:
logger.warning(f'Some credits were removed due to NaN values')
# one hot encoding
# define possible categories
hours = list(range(0,24))
weekday = list(range(1,8))
country = ['EE', 'ES', 'FI', 'SK']
education = list(range(-1,6))
employment = ['MoreThan5Years', 'UpTo3Years',
'UpTo5Years', 'UpTo1Year',
'UpTo2Years', 'UpTo4Years',
'TrialPeriod', 'Retiree', 'Other']
gender = list(range(0,3))
home = list(range(0,11))
language = list(range(1,27))
new = [False, True]
rating = ['AA', 'A', 'B', 'C', 'D', 'E', 'F', 'HR']
veri = list(range(0,5))
dummy = dataClean.head(30).copy().reset_index(drop=True)
dummy['Age'] = -99
for ii in range(0,30):
dummy.loc[ii, 'VerificationType'] = veri[min(ii,len(veri)-1)]
dummy.loc[ii, 'Gender'] = gender[min(ii,len(gender)-1)]
#dummy.loc[ii, 'ApplicationSignedHour'] = hours[min(ii,len(hours)-1)]
#dummy.loc[ii, 'ApplicationSignedWeekday'] = weekday[min(ii,len(weekday)-1)]
dummy.loc[ii, 'Country'] = country[min(ii,len(country)-1)]
dummy.loc[ii, 'Education'] = education[min(ii,len(education)-1)]
#dummy.loc[ii, 'EmploymentDurationCurrentEmployer'] = employment[min(ii,len(employment)-1)]
#dummy.loc[ii, 'HomeOwnershipType'] = home[min(ii,len(home)-1)]
#dummy.loc[ii, 'LanguageCode'] = language[min(ii,len(language)-1)]
dummy.loc[ii, 'NewCreditCustomer'] = new[min(ii,len(new)-1)]
dummy.loc[ii, 'Rating'] = rating[min(ii,len(rating)-1)]
dataClean = dataClean.append(dummy, sort=False)
# change specific columns numeric to string to trigger dummy creation
dataClean['VerificationType'] = dataClean.VerificationType.astype(int).astype(str)
dataClean['Gender'] = dataClean.Gender.astype(int).astype(str)
#dataClean['ApplicationSignedHour'] = dataClean.ApplicationSignedHour.astype(int).astype(str)
#dataClean['ApplicationSignedWeekday'] = dataClean.ApplicationSignedWeekday.astype(int).astype(str)
dataClean['Education'] = dataClean.Education.astype(int).astype(str)
#dataClean['HomeOwnershipType'] = dataClean.HomeOwnershipType.astype(int).astype(str)
#dataClean['LanguageCode'] = dataClean.LanguageCode.astype(int).astype(str)
dataClean = | pd.get_dummies(dataClean) | pandas.get_dummies |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os.path
import pkg_resources
import tempfile
import unittest
import numpy as np
import pandas as pd
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn, MetadataFileError)
def get_data_path(filename):
return pkg_resources.resource_filename('qiime2.metadata.tests',
'data/%s' % filename)
# NOTE: many of the test files in the `data` directory intentionally have
# leading/trailing whitespace characters on some lines, as well as mixed usage
# of spaces, tabs, carriage returns, and newlines. When editing these files,
# please make sure your code editor doesn't strip these leading/trailing
# whitespace characters (e.g. Atom does this by default), nor automatically
# modify the files in some other way such as converting Windows-style CRLF
# line terminators to Unix-style newlines.
#
# When committing changes to the files, carefully review the diff to make sure
# unintended changes weren't introduced.
class TestLoadErrors(unittest.TestCase):
def test_path_does_not_exist(self):
with self.assertRaisesRegex(MetadataFileError,
"Metadata file path doesn't exist"):
Metadata.load(
'/qiime2/unit/tests/hopefully/this/path/does/not/exist')
def test_path_is_directory(self):
fp = get_data_path('valid')
with self.assertRaisesRegex(MetadataFileError,
"path points to something other than a "
"file"):
Metadata.load(fp)
def test_non_utf_8_file(self):
fp = get_data_path('invalid/non-utf-8.tsv')
with self.assertRaisesRegex(MetadataFileError,
'encoded as UTF-8 or ASCII'):
Metadata.load(fp)
def test_utf_16_le_file(self):
fp = get_data_path('invalid/simple-utf-16le.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_utf_16_be_file(self):
fp = get_data_path('invalid/simple-utf-16be.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_empty_file(self):
fp = get_data_path('invalid/empty-file')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*file may be empty'):
Metadata.load(fp)
def test_comments_and_empty_rows_only(self):
fp = get_data_path('invalid/comments-and-empty-rows-only.tsv')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*only of comments or empty '
'rows'):
Metadata.load(fp)
def test_header_only(self):
fp = get_data_path('invalid/header-only.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_header_only_with_comments_and_empty_rows(self):
fp = get_data_path(
'invalid/header-only-with-comments-and-empty-rows.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_qiime1_empty_mapping_file(self):
fp = get_data_path('invalid/qiime1-empty.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_invalid_header(self):
fp = get_data_path('invalid/invalid-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'unrecognized ID column name.*'
'invalid_id_header'):
Metadata.load(fp)
def test_empty_id(self):
fp = get_data_path('invalid/empty-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_whitespace_only_id(self):
fp = get_data_path('invalid/whitespace-only-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_empty_column_name(self):
fp = get_data_path('invalid/empty-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_whitespace_only_column_name(self):
fp = get_data_path('invalid/whitespace-only-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_duplicate_ids(self):
fp = get_data_path('invalid/duplicate-ids.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_ids_with_whitespace(self):
fp = get_data_path('invalid/duplicate-ids-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_column_names(self):
fp = get_data_path('invalid/duplicate-column-names.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_duplicate_column_names_with_whitespace(self):
fp = get_data_path(
'invalid/duplicate-column-names-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_id_conflicts_with_id_header(self):
fp = get_data_path('invalid/id-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"ID 'id' conflicts.*ID column header"):
Metadata.load(fp)
def test_column_name_conflicts_with_id_header(self):
fp = get_data_path(
'invalid/column-name-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column name 'featureid' conflicts.*ID "
"column header"):
Metadata.load(fp)
def test_column_types_unrecognized_column_name(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'not_a_column.*column_types.*not a column '
'in the metadata file'):
Metadata.load(fp, column_types={'not_a_column': 'numeric'})
def test_column_types_unrecognized_column_type(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*column_types.*unrecognized column '
'type.*CATEGORICAL'):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'CATEGORICAL'})
def test_column_types_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'categorical',
'col3': 'numeric'})
def test_column_types_override_directive_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple-with-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col3': 'numeric'})
def test_directive_before_header(self):
fp = get_data_path('invalid/directive-before-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'directive.*#q2:types.*searching for '
'header'):
Metadata.load(fp)
def test_unrecognized_directive(self):
fp = get_data_path('invalid/unrecognized-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Unrecognized directive.*#q2:foo.*'
'#q2:types directive is supported'):
Metadata.load(fp)
def test_duplicate_directives(self):
fp = get_data_path('invalid/duplicate-directives.tsv')
with self.assertRaisesRegex(MetadataFileError,
'duplicate directive.*#q2:types'):
Metadata.load(fp)
def test_unrecognized_column_type_in_directive(self):
fp = get_data_path('invalid/unrecognized-column-type.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*unrecognized column type.*foo.*'
'#q2:types directive'):
Metadata.load(fp)
def test_column_types_directive_not_convertible_to_numeric(self):
fp = get_data_path('invalid/types-directive-non-numeric.tsv')
# This error message regex is intentionally verbose because we want to
# assert that many different types of non-numeric strings aren't
# interpreted as numbers. The error message displays a sorted list of
# all values that couldn't be converted to numbers, making it possible
# to test a variety of non-numeric strings in a single test case.
msg = (r"column 'col2' to numeric.*could not be interpreted as "
r"numeric: '\$42', '\+inf', '-inf', '0xAF', '1,000', "
r"'1\.000\.0', '1_000_000', '1e3e4', 'Infinity', 'NA', 'NaN', "
"'a', 'e3', 'foo', 'inf', 'nan', 'sample-1'")
with self.assertRaisesRegex(MetadataFileError, msg):
Metadata.load(fp)
def test_directive_after_directives_section(self):
fp = get_data_path(
'invalid/directive-after-directives-section.tsv')
with self.assertRaisesRegex(MetadataFileError,
'#q2:types.*outside of the directives '
'section'):
Metadata.load(fp)
def test_directive_longer_than_header(self):
fp = get_data_path('invalid/directive-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
def test_data_longer_than_header(self):
fp = get_data_path('invalid/data-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
class TestLoadSuccess(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
# This Metadata object is compared against observed Metadata objects in
# many of the tests, so just define it once here.
self.simple_md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index= | pd.Index(['id1', 'id2', 'id3'], name='id') | pandas.Index |
# to do:
# - calculate train score
# - learning curve plot (vary training examples used and examine the effect on train and validation set scores)
# - https://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html
# - add sampling in the model code
# - finish creating lists of hyperparameter values
# - https://neptune.ai/blog/lightgbm-parameters-guide
# - make sure log is not copied to S3 if the program crashes
# - check if LightGBM and Dask logging needs to be disabled - LightGBM probably sends all output to stdout
# - plot learning curves? https://stackoverflow.com/questions/60132246/how-to-plot-the-learning-curves-in-lightgbm-and-python
# - write docstrings and header
# - best_iteration - needed? (can be used while saving model)
# "s3://sales-demand-data/parquet_dataset/"
# save_model(filename, num_iteration=None, start_iteration=0, importance_type='split')[source]
# Save Booster to file.
#
# Parameters
# filename (string or pathlib.Path) – Filename to save Booster.
# num_iteration (int or None, optional (default=None)) – Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved.
# start_iteration (int, optional (default=0)) – Start index of the iteration that should be saved.
# importance_type (string, optional (default="split")) – What type of feature importance should be saved. If “split”, result contains numbers of times the feature is used in a model. If “gain”, result contains total gains of splits which use the feature.
#
# Returns
# self – Returns self.
#
# Return type
# Booster
import argparse
from datetime import datetime, timedelta
from itertools import product
import logging
import os
from pathlib import Path
import platform
from statistics import mean
import sys
import time
import boto3
from botocore.exceptions import ClientError
import dask as dsk
from dask import array as da, dataframe as dd
from dask.distributed import Client, LocalCluster, performance_report, wait
from dask_ml.metrics.regression import mean_squared_error
from dateutil.relativedelta import relativedelta
from ec2_metadata import ec2_metadata
import lightgbm as lgb
import numpy as np
import pandas as pd
def month_counter(fm, LAST_DAY_OF_TRAIN_PRD=(2015, 10, 31)):
"""Calculate number of months (i.e. month boundaries) between the first
month of train period and the end month of validation period.
Parameters:
-----------
fm : datetime
First day of first month of train period
Returns:
--------
Number of months between first month of train period and end month of validation period
"""
return (
(datetime(*LAST_DAY_OF_TRAIN_PRD).year - fm.year) * 12
+ datetime(*LAST_DAY_OF_TRAIN_PRD).month
- fm.month
)
def calc_rmse(y_true, y_pred, get_stats):
if get_stats:
pred_stats_to_csv(y_true, y_pred)
return mean_squared_error(y_true, y_pred, squared=False, compute=True)
def pred_stats_to_csv(y_true, y_pred, output_csv="pred_value_stats.csv"):
y_true_df = pd.DataFrame(y_true.compute(), columns=["y_true"])
y_pred_df = pd.DataFrame(
y_pred.compute(), columns=["y_pred"], index=y_true_df.index
) # convert Dask array to Pandas DF
full_df = pd.concat(
[y_true_df, y_pred_df], axis=1
) # join actual and predicted values
del y_true_df
del y_pred_df
stats_df = (
full_df.groupby("y_true")
.describe(percentiles=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99])
.droplevel(level=0, axis=1)
.reset_index()
)
stats_df.to_csv(output_csv, index=False)
s3_client = boto3.client("s3")
try:
s3_client.upload_file(output_csv, "sales-demand-data", output_csv)
logging.info(
"CSV file with descriptive stats of predicted values "
"successfully copied to S3."
)
except ClientError as e:
logging.exception(
"CSV file with descriptive stats of predicted values "
"was not copied to S3."
)
def calc_monthly_rmse(y_true_w_id_cols, y_pred):
y_true_df = y_true_w_id_cols.compute() # convert Dask dataframe to Pandas DF
y_pred_df = pd.DataFrame(
y_pred.compute(), columns=["y_pred"], index=y_true_df.index
) # convert Dask array to Pandas DF
full_df = pd.concat(
[y_true_df, y_pred_df], axis=1
) # join actual and predicted values
del y_true_df
del y_pred_df
# calculate sums of actual and predicted values by shop-item-month
# the code below assumes that same calendar month does not appear across multiple years in validation set
shop_item_month_df = (
full_df.groupby([full_df.index.month, "shop_id", "item_id"])
.agg("sum")
.reset_index()
)
# calculate RMSE for each month and then take the average of monthly values
return (
shop_item_month_df.groupby("sale_date")
.apply(
lambda x: np.sqrt(
np.average((x["sid_shop_item_qty_sold_day"] - x["y_pred"]) ** 2)
)
)
.mean()
)
# calculate monthly rmse
# return np.sqrt(np.average((shop_item_df['sid_shop_item_qty_sold_day'] - shop_item_df['y_pred'])**2))
def valid_frac(s):
"""Convert command-line fraction argument to float value.
Parameters:
-----------
s : str
Command-line argument for fraction of rows to sample
Returns:
--------
float
Raises:
-------
ArgumentTypeError
if input string cannot be converted to float or if the resulting float
is a negative value
"""
try:
f = float(s)
except ValueError:
msg = f"Not a valid fraction value: {s}. Enter a value between 0.0 and 1.0."
raise argparse.ArgumentTypeError(msg)
else:
if f < 0:
msg = f"{f} is an invalid positive float value. Enter a value between 0.0 and 1.0."
raise argparse.ArgumentTypeError(msg)
return f
def valid_date(s):
"""Convert command-line date argument to YY-MM datetime value.
Parameters:
-----------
s : str
Command-line argument for first month of data to be used
Returns:
--------
datetime object (format: %y-%m)
Raises:
-------
ArgumentTypeError
if input string cannot be parsed according to %y-%m strptime format
"""
try:
return datetime.strptime(s, "%y-%m")
except ValueError:
msg = f"Not a valid date: {s}."
raise argparse.ArgumentTypeError(msg)
# https://lightgbm.readthedocs.io/en/latest/Parameters-Tuning.html
# Deal with Over-fitting
# Use small max_bin
# Use small num_leaves
# Use min_data_in_leaf and min_sum_hessian_in_leaf
# Use bagging by set bagging_fraction and bagging_freq
# Use feature sub-sampling by set feature_fraction
# Use bigger training data
# Try lambda_l1, lambda_l2 and min_gain_to_split for regularization
# Try max_depth to avoid growing deep tree
# Try extra_trees
# Try increasing path_smooth
# boosting_type = 'gbdt', num_leaves=31, max_depth=- 1, learning_rate=0.1, n_estimators=100,
# subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0,
# min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0,
# colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=- 1,
# silent=True, importance_type='split', client=None, **kwargs
# param_names = ('num','let')
# for params_dict in (dict(zip(param_names,v)) for v in product([1,2,3],('a','b'))):
# print(params_dict)
# convert the generator expression into a list that's saved to instance variable,
# make the loop numbered (with enumerate),
# update the instance variable (one dictionary at a time) while looping over sets of hyperparameters,
# at the end, convert the full list of dictionaries into a table that can be exported to CSV
# params = {
# 'objective' : ['tweedie', 'regression', 'regression_l1', 'poisson'],
# 'metric' : ['rmse'], # tweedie, poisson, rmse, l1, l2
# 'boosting_type' : ['gdbt', 'dart', 'rf'],
# # num_leaves - sets the maximum number of nodes per tree. Decrease num_leaves to reduce training time.
# 'num_leaves' : [31, 62, 124], # max number of leaves in one tree, 31 is default
# # max_depth - this parameter is an integer that controls the maximum distance between the root node of each tree and a leaf node. Decrease max_depth to reduce training time. -1 is default (no limit)
# 'max_depth' : [5, 10],
# # num_iterations - number of boosting iterations, default is 100 (alias: n_estimators)
# 'num_iterations' : [50, 75, 100],
# # min_child_samples - minimal number of data in one leaf. Can be used to deal with over-fitting, 20 is default, aka min_data_in_leaf
# 'min_child_samples' : [2, 100, 1000],
# # learning_rate: default is 0.1
# 'learning_rate' : [0.1, 0.05, 0.01],
# # max_bin - max number of bins that feature values will be bucketed in, use larger value for better accuracy (may be slower), smaller value helps deal with over-fitting, default is 255
# 'max_bin' : [128, 255],
# # subsample_for_bin - number of data that sampled to construct feature discrete bins, default: 200000
# 'subsample_for_bin' : [200000],
# # bagging_fraction - for random selection of part of the data, without resampling, default: 1.0, constraints: 0.0 < bagging_fraction <= 1.0
# 'bagging_fraction' : [1.0],
# # bagging_freq - frequency for bagging, 0 means disable bagging; k means perform bagging at every k iteration. default: 0
# 'bagging_freq' : [0],
# # feature_fraction - LightGBM will randomly select a subset of features on each iteration (tree) if feature_fraction is smaller than 1.0, default: 1.0, constraints: 0.0 < feature_fraction <= 1.0
# # colsample_bytree (float, optional (default=1.)) – Subsample ratio of columns when constructing each tree.
# 'colsample_bytree' : [1.0]
# }
params = {
"objective": ["tweedie"],
"metric": ["rmse"], # tweedie, poisson, rmse, l1, l2
"boosting_type": ["gbdt"],
# num_leaves - sets the maximum number of nodes per tree. Decrease num_leaves to reduce training time.
"num_leaves": [28], # max number of leaves in one tree, 31 is default
# max_depth - this parameter is an integer that controls the maximum distance between the root node of each tree and a leaf node. Decrease max_depth to reduce training time. -1 is default (no limit)
# To keep in mind: "Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31)."
"max_depth": [5],
# num_iterations - number of boosting iterations, default is 100 (alias: n_estimators)
"num_iterations": [1000],
# min_child_samples - minimal number of data in one leaf. Can be used to deal with over-fitting, 20 is default, aka min_data_in_leaf
"min_child_samples": [200],
# learning_rate: default is 0.1
"learning_rate": [0.01],
# max_bin - max number of bins that feature values will be bucketed in, use larger value for better accuracy (may be slower), smaller value helps deal with over-fitting, default is 255
"max_bin": [255],
# subsample_for_bin - number of data that sampled to construct feature discrete bins, default: 200000
"subsample_for_bin": [200000],
# bagging_fraction - for random selection of part of the data, without resampling, default: 1.0, constraints: 0.0 < bagging_fraction <= 1.0
"bagging_fraction": [0.6],
# bagging_freq - frequency for bagging, 0 means disable bagging; k means perform bagging at every k iteration. default: 0
"bagging_freq": [0],
# feature_fraction - LightGBM will randomly select a subset of features on each iteration (tree) if feature_fraction is smaller than 1.0, default: 1.0, constraints: 0.0 < feature_fraction <= 1.0
# colsample_bytree (float, optional (default=1.)) – Subsample ratio of columns when constructing each tree.
"colsample_bytree": [1.],
"tweedie_variance_power": [1.4],
"weight_for_zeros": [1.],
}
# additional parameters
# pre_partition: https://lightgbm.readthedocs.io/en/latest/Parameters.html#pre_partition
# default: false
# used for distributed learning (excluding the feature_parallel mode)
# true if training data are pre-partitioned, and different machines use different partitions
# tweedie_variance_power: https://lightgbm.readthedocs.io/en/latest/Parameters.html#tweedie_variance_power
# default: 1.5, constraints: 1.0 <= tweedie_variance_power < 2.0
# used only in tweedie regression application
# used to control the variance of the tweedie distribution
# set this closer to 2 to shift towards a Gamma distribution
# set this closer to 1 to shift towards a Poisson distribution
# poisson_max_delta_step: https://lightgbm.readthedocs.io/en/latest/Parameters.html#poisson_max_delta_step
# default: 0.7, constraints: poisson_max_delta_step > 0.0
# used only in poisson regression application
# parameter for Poisson regression to safeguard optimization
# distributed learning
# num_threads: https://lightgbm.readthedocs.io/en/latest/Parameters.html#num_threads
# number of threads for LightGBM, default: 0
# for the best speed, set this to the number of real CPU cores, not the number of threads (most CPUs use hyper-threading to generate 2 threads per CPU core)
# for distributed learning, do not use all CPU cores because this will cause poor performance for the network communication
# n_jobs (int, optional (default=-1)) – Number of parallel threads.
# https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.DaskLGBMRegressor.html#lightgbm.DaskLGBMRegressor
# parameters specific to objective
# lambda_l1 - L1 regularization, default: 0.0, constraints: lambda_l1 >= 0.0
# lambda_l2 - L2 regularization, default: 0.0, constraints: lambda_l2 >= 0.0
# for DaskLGBMRegressor: reg_alpha – L1 regularization term on weights, reg_lambda – L2 regularization term on weights.
# b = {'objective' : ['tweedie', 'regression_l1', 'poisson'], 'boosting_type' : ['gdbt', 'dart', 'rf']}
# # >>> list(product(*list(b.values())))
# [('tweedie', 'gdbt'), ('tweedie', 'dart'), ('tweedie', 'rf'), ('regression_l1', 'gdbt'), ('regression_l1', 'dart'), ('regression_l1', 'rf'), ('poisson', 'gdbt'), ('poisson', 'dart'), ('poisson', 'rf')]
#
# [dict(zip(b.keys(), v)) for v in list(product(*list(b.values())))]
class LightGBMDaskLocal:
# https://github.com/Nixtla/mlforecast/blob/main/nbs/distributed.forecast.ipynb
"""
persist call: data = self.client.persist(data)
(assignment replaces old lazy array, as persist does not change the
input in-place)
To reduce the risk of hitting memory limits,
consider restarting each worker process before running any data loading or training code.
self.client.restart()
- This function will restart each of the worker processes, clearing out anything
they’re holding in memory. This function does NOT restart the actual machines of
your cluster, so it runs very quickly.
- should the workers just be killed regardless of whether the whole process
was successful or unsuccessful (sort of a clean up action)? can restarting
be that cleanup action?
loop over hyperparameter values (method that accepts hyperparameters as a dictionary -
initializes self.model = DaskLGBMRegressor() with each set of parameters and
calls the method that loops over )
loop over train-valdation sets
run model's fit method and compute predicted values and RMSE
"""
def __init__(
self,
curr_dt_time,
n_workers,
s3_path,
startmonth,
n_months_in_first_train_set,
n_months_in_val_set,
frac=None,
):
self.curr_dt_time = curr_dt_time
self.startmonth = startmonth
self.n_months_in_first_train_set = n_months_in_first_train_set
self.n_months_in_val_set = n_months_in_val_set
self.frac = frac if frac is not None else 1.0
cluster = LocalCluster(n_workers=n_workers)
self.client = Client(cluster)
self.client.wait_for_workers(n_workers)
print(f"***VIEW THE DASHBOARD HERE***: {cluster.dashboard_link}")
# self.pca_transformed = ___ # call PCA code that returns numpy array here
# (rename self.pca_transformed to self.full_dataset)
# numpy array can also be created from the saved (pickle) file
# for data:
# instead of first looping over hyperparameter values and then over different
# train-validation sets, is it better to do it in the opposite order
# to allow for one set of train-validation data to be created only once?
try:
# this commented out code did not work without the meta= argument,
# meta= was not tried as it needs all other columns listed, in
# addition to the ones being recast
# self.full_dataset = self.client.persist(
# dd.read_parquet(
# s3_path, index=False, engine="pyarrow"
# )
# .sample(frac=self.frac, random_state=42)
# .map_partitions(
# self.cast_types,
# meta={
# 'sid_shop_item_qty_sold_day': 'i2',
# **{f'cat{n}': 'i2' for n in range(1,23)}
# }
# )
# .map_partitions(self.drop_neg_qty_sold)
# .set_index(
# "sale_date", sorted=False, npartitions="auto"
# )
# .repartition(partition_size="100MB")
# )
# create Dask dataframe from partitioned Parquet dataset on S3 and persist it to cluster
self.full_dataset = dd.read_parquet(
s3_path, index=False, engine="pyarrow"
).sample(frac=self.frac, random_state=42)
self.full_dataset["sale_date"] = self.full_dataset["sale_date"].astype(
"datetime64[ns]"
)
self.full_dataset["sid_shop_item_qty_sold_day"] = self.full_dataset[
"sid_shop_item_qty_sold_day"
].astype("int16")
for col in self.full_dataset:
if col.startswith("cat"):
self.full_dataset[col] = self.full_dataset[col].astype("int16")
logging.debug(
f"# of rows in full dataframe before removal of negative target values: {len(self.full_dataset)}"
)
self.full_dataset = self.full_dataset[
self.full_dataset.sid_shop_item_qty_sold_day >= 0
]
# call dataframe.set_index(), then repartition, then persist
# https://docs.dask.org/en/latest/generated/dask.dataframe.DataFrame.set_index.html
# set_index(sorted=False, npartitions='auto')
# df = df.repartition(npartitions=df.npartitions // 100)
# self.full_dataset = self.client.persist(self.full_dataset)
# _ = wait([self.full_dataset])
# https://docs.dask.org/en/latest/generated/dask.dataframe.DataFrame.repartition.html
# self.full_dataset = self.full_dataset.repartition(partition_size="100MB")
self.full_dataset = self.full_dataset.set_index(
"sale_date", sorted=False, npartitions="auto", partition_size=100_000_000,
)
# partition_size for set_index: int, optional, desired size of
# eaach partition in bytes (to be used with npartitions='auto')
self.full_dataset = self.cull_empty_partitions(self.full_dataset)
self.full_dataset = self.client.persist(self.full_dataset)
_ = wait([self.full_dataset])
logging.debug(
f"# of rows in full dataframe after removal of negative target values: {len(self.full_dataset)}"
)
logging.debug(
f"Earliest and latest dates in full dataframe are : {dd.compute(self.full_dataset.index.min(), self.full_dataset.index.max())}"
)
logging.debug(
f"Data types of full Dask dataframe are: {self.full_dataset.dtypes}"
)
except Exception:
logging.exception(
"Exception occurred while creating Dask dataframe and persisting it on the cluster."
)
# kill all active work, delete all data on the network, and restart the worker processes.
self.client.restart()
sys.exit(1)
# finally:
# self.client.restart()
# sys.exit(1)
# https://stackoverflow.com/questions/58437182/how-to-read-a-single-large-parquet-file-into-multiple-partitions-using-dask-dask
# Parquet datasets can be saved into separate files.
# Each file may contain separate row groups.
# Dask Dataframe reads each Parquet row group into a separate partition.
# I DON'T WANT TO KEEP THE NUMPY ARRAY IN MEMORY, SO IT NEEDS TO BE
# DELETED AFTER DASK ARRAY IS CREATED
# MIGHT BE BETTER TO CREATE DASK ARRAY FROM FILE ON S3, TO AVOID
# HAVING BOTH NUMPY ARRAY AND PERSISTED DASK ARRAY IN MEMORY
# I ALSO WANT TO SPLIT THAT NUMPY ARRAY INTO MULTIPLE TRAIN AND VALIDATION
# SETS, SO WHAT'S THE BEST WAY TO DO THAT?
# SEND THE ENTIRE ARRAY TO THE CLUSTER AT ONCE - PROBABLY NOT, OR
# SEND TRAIN AND VALIDATION SETS ONE BY ONE AND DELETE?
# BUT THAT WILL REQUIRE SENDING DATA TO THE CLUSTER MULTIPLE TIMES -
# NOT IF THE DATA BEING SENT ARE DIFFERENT EACH TIME
# THEY ARE NOT GOING TO BE COMPLETELY DIFFERENT BECAUSE TRAIN DATA WILL
# JUST CONTINUE TO MERGE WITH VALIDATION SETS AND GROW
# CREATE FIRST DASK ARRAY AND SEND TO CLUSTER, THEN APPEND TO IT?
# IT DOES NOT LOOK LIKE DASK WOULD ALLOW THAT (SEE
# https://github.com/dask/distributed/issues/1676 -
# "You should also be aware that the task/data model underlying dask
# arrays is immutable. You should never try to modify memory in-place.")
# SO PROBABLY SEND ALL OF THE DATA TO THE CLUSTER AT THE BEGINNING,
# THEN TAKE CHUNKS OF IT FOR WALK-FORWARD VALIDATION
# PROBABLY SHOULD RELY ON LOADING DATA FROM FILE USING DELAYED /
# FROM_DELAYED
# SEE https://stackoverflow.com/questions/45941528/how-to-efficiently-send-a-large-numpy-array-to-the-cluster-with-dask-array)
# can I use a function to read multiple files into one Dask array?
# either figure out how to read multiple files (saved on S3) into one
# Dask array, or
# figure out how to save one array of PCA results to S3 (need disk space
# to save it locally before transfer to S3 and need a method that can
# handle transfer of more than 5GB - multipart transfer to S3)
# try to write PCA-transformed data directly to zarr array (stored in memory)
# then upload it to S3 (directly from memory)
# then create dask array from that zarr array in S3
# try to write PCA-transformed data to xarray then upload it to S3 as zarr
# save numpy array to parquet file, upload that file to S3 (using upload_file),
# then read that file into a Dask dataframe
# write data to parquet on S3 from pandas dataframe and append to it using awswrangler library?
# (https://github.com/awslabs/aws-data-wrangler/blob/main/tutorials/004%20-%20Parquet%20Datasets.ipynb)
# df = dd.read_parquet('s3://bucket/my-parquet-data')
# (https://docs.dask.org/en/latest/generated/dask.dataframe.read_parquet.html#dask.dataframe.read_parquet)
# from above link:
# engine argument: If ‘pyarrow’ or ‘pyarrow-dataset’ is specified, the ArrowDatasetEngine (which leverages the pyarrow.dataset API) will be used.
# read partitioned parquet dataset with Dask:
# https://stackoverflow.com/questions/67222212/read-partitioned-parquet-dataset-written-by-spark-using-dask-and-pyarrow-dataset
# def cast_types(self, df):
# df = df.copy()
# df['sale_date'] = df["sale_date"].astype(
# "datetime64[ns]"
# )
# for col in df:
# if col.startswith("cat") or (col == "sid_shop_item_qty_sold_day"):
# df[col] = df[col].astype("int16")
# return df
#
# def drop_neg_qty_sold(self, df):
# return df[df.sid_shop_item_qty_sold_day >= 0].copy()
# function from https://stackoverflow.com/questions/47812785/remove-empty-partitions-in-dask
def cull_empty_partitions(self, ddf):
ll = list(ddf.map_partitions(len).compute())
ddf_delayed = ddf.to_delayed()
ddf_delayed_new = list()
pempty = None
for ix, n in enumerate(ll):
if 0 == n:
pempty = ddf.get_partition(ix)
else:
ddf_delayed_new.append(ddf_delayed[ix])
if pempty is not None:
ddf = dd.from_delayed(ddf_delayed_new, meta=pempty)
return ddf
def gridsearch_wfv(self, params):
# self.hyperparameters = hyperparameters
# self.rmse_results = defaultdict(list) # replace this variable by creating a key-value in
# the self.hyper_dict dictionary with value containing list of RMSE values
self.all_params_combs = list()
# determine if there is more than one combination of hyperparameters
# if only one combination, set get_stats_ flag to True
self.get_stats_ = (
len(params[max(params, key=lambda x: len(params[x]))]) == 1
)
for params_comb_dict in (
dict(zip(params.keys(), v)) for v in list(product(*list(params.values())))
):
# for self.hyper_dict in hyperparameters:
# self.params_combs_list.append(params_comb_dict)
self.params_comb_dict = params_comb_dict.copy()
self.params_comb_dict["rmse_list_"] = list()
self.params_comb_dict["monthly_rmse_list_"] = list()
self.params_comb_dict["fit_times_list_"] = list()
try:
self.model = lgb.DaskLGBMRegressor(
client=self.client,
random_state=42,
silent=False,
tree_learner="data",
force_row_wise=True,
**params_comb_dict,
)
except Exception:
logging.exception("Exception occurred while initializing Dask model.")
# kill all active work, delete all data on the network, and restart the worker processes.
self.client.restart()
sys.exit(1)
# call method that loops over train-validation sets
with performance_report(filename=f"dask_report_{self.curr_dt_time}.html"):
for train, test, get_stats in self.train_test_time_split():
self.fit(train).predict(test).rmse_all_folds(test, get_stats)
self.params_comb_dict["avg_rmse_"] = mean(
self.params_comb_dict["rmse_list_"]
)
self.params_comb_dict["monthly_avg_rmse_"] = mean(
self.params_comb_dict["monthly_rmse_list_"]
)
self.all_params_combs.append(self.params_comb_dict)
best_params = min(self.all_params_combs, key=lambda x: x["monthly_avg_rmse_"])
self.best_score_ = best_params["monthly_avg_rmse_"]
# remove non-parameter key-values from self.best_params (i.e., rmse_list_ and avg_rmse_, etc.)
self.best_params_ = {k: v for k, v in best_params.items() if k in params}
# save list of parameter-result dictionaries to dataframe and then to CSV
if self.all_params_combs:
all_params_combs_df = | pd.DataFrame(self.all_params_combs) | pandas.DataFrame |
import logging
import graphviz
import pandas as pd
from ..linx.data import ParquetData
from ..linx.ds import BayesianNetwork, \
ConditionalProbabilityTable as CPT, Query
from ..linx.infer import VariableElimination
from .conftest import (assert_approx_value_df, clean_tmp, create_df_medium,
create_prior_df, get_tmp_path)
def create_levels_df_mini():
collection = []
for narrative in range(5):
for viz in range(5):
for level in range(5):
true_level = min(
narrative,
viz,
)
if level == true_level:
value = 1.0
collection.append({
'Narrative': narrative,
'Visualization': viz,
'Level': level,
'value': value
})
return | pd.DataFrame(collection) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module contains all classes and functions specific for processing GLS.
"""
import pandas as pd
import numpy as np
import scipy
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
import sandy
__author__ = "<NAME>"
__all__ = [
"gls_update",
"_y_calc",
"chi_individual",
"chi_diag",
"chi_square",
"ishikawa_factor",
"constrained_gls_update"
]
x_prior = [1, 2, 3]
y_extra = pd.Series([2, 3, 4], index=[1, 2, 3])
S = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
Vx_prior = [[0, 0, 0], [0, 3, 0], [0, 0, 8]]
Vy_extra = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
N_e = 1
def gls_update(x_prior, S, Vx_prior, Vy_extra, y_extra, sparse=False,
threshold=None):
"""
Perform GlS update for a given variances, vectors and sensitivity.
.. math::
$$
x_{post} = x_{prior} + V_{x_{prior}}\cdot S.T \cdot \left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1} \cdot \left(y_{extra} - y_{calc}\right)
$$
Parameters
----------
x_prior: 1D iterable
Vector in which we are going to apply GLS (MX1)
Vx_prior : 2D iterable
2D covariance matrix of x_prior (MXN).
Vy_extra : 2D iterable
2D covariance matrix for y_extra (MXN).
S : 2D iterable
2D sensitivity of the model y=f(x) (MXN).
y_extra : 1D iterable
1D extra info on output (NX1)
sparse : `bool`, optional
Option to use sparse matrix for calculations. The default is False.
threshold : `int`, optional
Thereshold to avoid numerical fluctuations. The default is None.
Returns
-------
`pd.Series`
GLS apply to a vector x_prior given S, Vx_prior, Vy_extra, y_calc
and y_extra.
Example
-------
>>> S = [[1, 2], [3, 4]]
>>> y = pd.Series([1, 1])
>>> Vx = sandy.CategoryCov.from_var([1, 1]).data
>>> Vy = pd.DataFrame([[1, 0], [0, 1]], index=[1, 2], columns=[1, 2])
>>> x = [1, 1]
>>> x_p = [2, 2]
>>> gls_update(y, S, Vx, Vy, x_p)
0 2.00000e-01
1 4.85714e-01
dtype: float64
>>> gls_update(y, S, Vx, Vy, x_p, sparse=True)
0 2.00000e-01
1 4.85714e-01
dtype: float64
"""
# Model calculus:
x_prior_ = pd.Series(x_prior)
S_ = pd.DataFrame(S).reindex(columns=x_prior_.index)
y_calc_ = _y_calc(x_prior, S, sparse=sparse)
y_extra_ = pd.Series(y_extra)
y_calc_ = y_calc_.reindex(y_extra_.index)
S_ = S_.reindex(index=y_extra_.index)
# Data in a appropriate format
delta = y_extra_ - y_calc_
Vx_prior_ = sandy.CategoryCov(Vx_prior)
# GLS update
A = Vx_prior_._gls_general_sensitivity(S_, Vy_extra, sparse=sparse,
threshold=threshold).values
if sparse:
A = sps.csr_matrix(A)
index = x_prior_.index
x_prior_ = x_prior_.values
x_post = x_prior_ + A.dot(delta)
x_post = pd.Series(x_post, index=index)
else:
x_post = x_prior_ + A.dot(delta)
return x_post
def _y_calc(x_prior, S, sparse=False):
"""
Perform model calculation in GLS.
Parameters
----------
x_prior: 1D iterable
Vector in which we are going to apply GLS (MX1).
S : 2D iterable
2D sensitivity of the model y=f(x) (MXN).
sparse : `bool`, optional
Option to use sparse matrix for calculations. The default is False.
Returns
-------
y_calc : `pd.Series`
1D calculated output using S.dot(x_prior), e.g. calculated CFY
Example
-------
S square matrix:
>>> _y_calc(x_prior, S)
0 1
1 2
2 3
dtype: int64
Different number of row and columns in S:
>>> S = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]]
>>> _y_calc(x_prior, S)
0 1
1 2
2 3
3 6
dtype: int64
>>> _y_calc(x_prior, S, sparse=True)
0 1
1 2
2 3
3 6
dtype: int64
"""
S_ = pd.DataFrame(S)
x_prior_ = pd.Series(x_prior).reindex(S_.columns).fillna(0)
if sparse:
index = S_.index
S_ = sps.csr_matrix(S_.values)
y_calc = S_.dot(x_prior_.values)
y_calc = pd.Series(y_calc, index=index)
else:
y_calc = S_.dot(x_prior_)
return y_calc
def chi_individual(x_prior, S, Vx_prior, Vy_extra, y_extra, sparse=False):
"""
Function to calculate individual chi-value measured in sigmas according to
https://www.oecd-nea.org/jcms/pl_19760/intermediate-report-on-methods-and-approaches-to-provide-feedback-from-nuclear-and-covariance-data-adjustment-for-improvement-of-nuclear-data-files
(page 9, equation (4.2))
Parameters
----------
x_prior: 1D iterable
Vector in which we are going to apply GLS (MX1)
S : 2D iterable
2D sensitivity of the model y=f(x) (MXN).
Vx_prior : 2D iterable
2D covariance matrix of x_prior (MXN).
Vy_extra : 2D iterable
2D covariance matrix for y_extra (MXN).
y_extra : 1D iterable
1D extra info on output (NX1).
sparse : `bool`, optional
Option to use sparse matrix for calculations. The default is False.
Returns
-------
`pd.Series`
individual chi-value measured in sigmas.
Example
-------
>>> chi_individual(x_prior, S, Vx_prior, Vy_extra, y_extra)
1 1.00000e+00
2 5.00000e-01
3 3.33333e-01
dtype: float64
>>> chi_individual(x_prior, S, Vx_prior, Vy_extra, y_extra, sparse=True)
1 1.00000e+00
2 5.00000e-01
3 3.33333e-01
dtype: float64
"""
Vx_prior_ = sandy.CategoryCov(Vx_prior)
G = Vx_prior_._gls_G(S, Vy_extra, sparse=sparse)
G = np.sqrt(np.diag(G))
y_calc_ = _y_calc(x_prior, S, sparse=sparse).values
y_extra_ = pd.Series(y_extra)
delta = np.abs(y_extra_.values - y_calc_)
return pd.Series(delta / G, index=y_extra_.index)
def chi_diag(x_prior, S, Vx_prior, Vy_extra, y_extra, sparse=False):
"""
Function to calculate diagonal chi-value measured in sigmas
$\chi_{ind,i}$>>1 according to
https://www.oecd-nea.org/jcms/pl_19760/intermediate-report-on-methods-and-approaches-to-provide-feedback-from-nuclear-and-covariance-data-adjustment-for-improvement-of-nuclear-data-files
(page 9, equation (4.3))
Parameters
----------
x_prior: 1D iterable
Vector in which we are going to apply GLS (MX1)
S : 2D iterable
2D sensitivity of the model y=f(x) (MXN).
Vx_prior : 2D iterable
2D covariance matrix of x_prior (MXN).
Vy_extra : 2D iterable
2D covariance matrix for y_extra (MXN).
y_extra : 1D iterable
1D extra info on output (NX1)
sparse : `bool`, optional
Option to use sparse matrix for calculations. The default is False.
Returns
-------
`pd.Series`
diagonal chi-value measured in sigmas $\chi_{ind,i}$>>1
Example
-------
>>> chi_diag(x_prior, S, Vx_prior, Vy_extra, y_extra)
1 1.00000e+00
2 2.00000e+00
3 3.00000e+00
dtype: float64
>>> chi_diag(x_prior, S, Vx_prior, Vy_extra, y_extra, sparse=True)
1 1.00000e+00
2 2.00000e+00
3 3.00000e+00
dtype: float64
"""
Vx_prior_ = sandy.CategoryCov(Vx_prior)
G_inv = Vx_prior_._gls_G_inv(S, Vy_extra, sparse=sparse).values
G_inv = np.sqrt(np.diag(G_inv))
y_calc_ = _y_calc(x_prior, S, sparse=sparse).values
y_extra_ = pd.Series(y_extra)
delta = np.abs(y_extra_.values - y_calc_)
return pd.Series(delta / G_inv, index=y_extra_.index)
def chi_square(x_prior, S, Vx_prior, Vy_extra, y_extra, N_e, sparse=False):
"""
Function to calculate contribution to chi-square value according to
https://www.oecd-nea.org/jcms/pl_19760/intermediate-report-on-methods-and-approaches-to-provide-feedback-from-nuclear-and-covariance-data-adjustment-for-improvement-of-nuclear-data-files
(page 10, equation (4.4))
Parameters
----------
x_prior: 1D iterable
Vector in which we are going to apply GLS (MX1)
S : 2D iterable
2D sensitivity of the model y=f(x) (MXN).
Vx_prior : 2D iterable
2D covariance matrix of x_prior (MXN).
Vy_extra : 2D iterable
2D covariance matrix for y_extra (MXN).
y_extra : 1D iterable
1D extra info on output (NX1)
N_e : `int`
Number of experimental values used in adjustment.
sparse : `bool`, optional
Option to use sparse matrix for calculations. The default is False.
Returns
-------
`pd.Series`
contribution to chi-square value
Example
-------
>>> chi_square(x_prior, S, Vx_prior, Vy_extra, y_extra, N_e)
1 1.00000e+00
2 2.50000e-01
3 1.11111e-01
dtype: float64
>>> chi_square(x_prior, S, Vx_prior, Vy_extra, y_extra, N_e, sparse=True)
1 1.00000e+00
2 2.50000e-01
3 1.11111e-01
dtype: float64
"""
Vx_prior_ = sandy.CategoryCov(Vx_prior)
G_inv = Vx_prior_._gls_G_inv(S, Vy_extra, sparse=sparse).values
y_calc_ = _y_calc(x_prior, S, sparse=sparse).values
y_extra_ = pd.Series(y_extra)
delta = y_extra_.values - y_calc_
chi_square = delta.T.dot(G_inv) * delta / N_e
return pd.Series(chi_square, index=y_extra_.index)
def ishikawa_factor(S, Vx_prior, Vy_extra, sparse=False):
"""
Function to obtain Ishikawa factor according to
https://www.oecd-nea.org/jcms/pl_19760/intermediate-report-on-methods-and-approaches-to-provide-feedback-from-nuclear-and-covariance-data-adjustment-for-improvement-of-nuclear-data-files
(page 10, equation (4.5))
Parameters
----------
S : 2D iterable
2D sensitivity of the model y=f(x) (MXN).
Vx_prior : 2D iterable
2D covariance matrix of x_prior (MXN).
Vy_extra : 2D iterable
2D covariance matrix for y_extra (MXN).
sparse : `bool`, optional
Option to use sparse matrix for calculations. The default is False.
Returns
-------
`pd.Series`
Ishikawa factor.
Example
-------
>>> ishikawa_factor(S, Vx_prior, Vy_extra)
0 0.00000e+00
1 3.00000e+00
2 8.00000e+00
dtype: float64
>>> ishikawa_factor(S, Vx_prior, Vy_extra, sparse=True)
0 0.00000e+00
1 3.00000e+00
2 8.00000e+00
dtype: float64
"""
Vx_prior_ = sandy.CategoryCov(Vx_prior)
Vy_calc = Vx_prior_._gls_Vy_calc(S, sparse=sparse)
Vy_values = np.diag(Vy_calc)
Vy_extra_ = np.diag( | pd.DataFrame(Vy_extra) | pandas.DataFrame |
import numpy as np
"""
This monte carlo algorithm aproximates the "true" value of the interesting
parameter/s using a random walk of normally distributed steps with mean 0 or
a mean of the last accepted step in the walk for the parameter.
"""
truth=5
tss = []
for j in range(50):
ts = []
stepsizes = [.01,.05,.1,.5,1,5,10]
index=0
while len(ts) < len(stepsizes):
w0 = 0
score1 = abs(truth-w0)
score=score1
delta = 0
t = 0
u = 0
stepsize=stepsizes[index]
while (score1 > .5)&(t<1000):
w1 = w0+np.random.normal(delta,stepsize)
score2 = abs(truth-w1)
if -score2>-score1:
delta = w1-w0
w0 = w1
score1=score2
u+=1
t+=1
print(t,score1,u)
if score1 <=.5:
ts.append(t)
index+=1
tss.append(ts)
tss=np.array(tss)
stepsize = stepsizes[np.argmin(np.mean(tss,axis=0))]
truth = 5
w0 = 0
score1 = abs(truth-w0)
score=score1
delta = 0
t = 0
u = 0
while (score1 > .5)&(t<1000):
w1 = w0+np.random.normal(delta,stepsize)
score2 = abs(truth-w1)
if -score2>-score1:
delta = w1-w0
w0 = w1
score1=score2
u+=1
t+=1
print(t,score1,u)
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
dat = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/00519/heart_failure_clinical_records_dataset.csv")
pd.set_option("display.max_columns",500)
dat.tail()
covars = ['age','anaemia','creatinine_phosphokinase',
'diabetes','ejection_fraction','high_blood_pressure',
'platelets','serum_creatinine','serum_sodium',
'sex','smoking','time']
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
Yodds = Y/(1-Y)
Yodds = np.where(Yodds==np.inf,1e16,1e-16)
Ylogodds = np.log(Yodds)
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
X['int']=1
random.seed(42)
index = np.array(random.choices([1,2,3,4,5],k=len(X)))
xv = X[index==5].copy()
yv = Ylogodds[index==5].copy()
xt = X[index!=5].copy()
yt = Ylogodds[index!=5].copy()
coefs = np.linalg.pinv(xt.T@xt)@(xt.T@yt)
predtlogodds = xt@coefs
predvlogodds = xv@coefs
predt=np.exp(predtlogodds)/(1+np.exp(predtlogodds))
predt=np.where(predt>.5,1,0)
predv=np.exp(predvlogodds)/(1+np.exp(predvlogodds))
predv=np.where(predv>.5,1,0)
act_t = np.exp(yt)/(1+np.exp(yt))
act_t=np.where(act_t>.5,1,0)
act_v = np.exp(yv)/(1+np.exp(yv))
act_v=np.where(act_v>.5,1,0)
logregt_acc=sum(np.where(predt==act_t,1,0))/len(predt)
logregv_acc = sum(np.where(predv==act_v,1,0))/len(predv)
print("logreg training acc:",logregt_acc,"val acc:",logregv_acc)
from sklearn.linear_model import LogisticRegression
xv = X[index==5].copy()
yv = Y[index==5].copy()
xt = X[index!=5].copy()
yt = Y[index!=5].copy()
lr = LogisticRegression(fit_intercept=False,solver = 'newton-cg',penalty='l2')
lr.fit(xt,yt)
sum(np.where(lr.predict(xt)==yt,1,0))/len(yt)
sum(np.where(lr.predict(xv)==yv,1,0))/len(yv)
#BASE KNN Maximizing Recall
from sklearn.neighbors import KNeighborsClassifier
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
acc = []
for i in list(range(2,30)):
avgscore=[]
for t in [1,2,3,4,5]:
xv = X[index==t].copy()
yv = Y[index==t].copy()
xt = X[~pd.Series(index).isin([t,6])].copy()
yt = Y[~pd.Series(index).isin([t,6])].copy()
knn = KNeighborsClassifier(n_neighbors=i,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
avgscore.append(score)
acc.append(np.mean(avgscore))
plt.plot(acc)
plt.xticks(list(range(28)),list(range(2,30)))
plt.show()
#k=18
k=4
k=16
def model_precision(X,Y,w,k):
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w,yt)
tp=sum(np.where((knn.predict(xv*w)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
initscores.append(score)
score=np.mean(initscores)
return score
def model_recall(X,Y,w,k):
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w,yt)
tp=sum(np.where((knn.predict(xv*w)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = recall
initscores.append(score)
score=np.mean(initscores)
return score
def sequential_MCMC(X,Y,model_fn,draws=30,no_update_limit=120,
stepsize=.1,step_shrinkage=.9,
delta_reset=20,):
#INITIAL SCORE
w0 = np.ones(len(X.columns.values))
score = model_fn(X,Y,w0,k)
scoreinit=score
wfin = []
scores = []
while len(wfin)<draws:
noupdate=0
deltachosen=False
stepsize=stepsize
score=scoreinit
delta=np.random.normal(0,stepsize/2,len(covars))
w0=np.ones(len(X.columns.values))
while noupdate<no_update_limit:
w1 = w0+np.random.normal(delta,stepsize,len(X.columns.values))
score2 = model_fn(X,Y,w1,k)
if score2>score:
print(score2,score,"accepted",noupdate)
deltachosen==True
score=score2
delta = w1-w0
w0=w1
noupdate=0
else:
#print(score2,score)
noupdate+=1
if deltachosen==False:
delta=np.random.normal(0,stepsize/2,len(X.columns.values))
if noupdate%delta_reset==delta_reset:
deltachosen=False
stepsize=stepsize*step_shrinkage
delta=np.random.normal(0,stepsize/2,len(X.columns.values))
if score>scoreinit:
wfin.append(w0)
scores.append(score)
wfin_arr=np.vstack(wfin)
return(wfin_arr,scores)
wfin_arr,scores=sequential_MCMC(X,Y,model_fn=model_precision,draws=30,no_update_limit=120,
stepsize=.1,step_shrinkage=.9,
delta_reset=20)
print(np.mean(wfin_arr,axis=0))
print(np.std(wfin_arr,axis=0))
for i in range(12):
plt.hist(wfin_arr.T[i],bins=10)
plt.title(covars[i])
plt.show()
method=np.median
xv = X[pd.Series(index).isin([6])].copy()
yv = Y[pd.Series(index).isin([6])].copy()
xt = X[pd.Series(index).isin([1,2,3,4,5])].copy()
yt = Y[pd.Series(index).isin([1,2,3,4,5])].copy()
wf=method(wfin_arr,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf,yt)
tp=sum(np.where((knn.predict(xv*wf)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
wfin_arr,scores=sequential_MCMC(X,Y,model_fn=model_recall,draws=30,no_update_limit=120,
stepsize=.1,step_shrinkage=.9,
delta_reset=20)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf,yt)
tp=sum(np.where((knn.predict(xv*wf)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
stepsize=.1
w0=np.ones(len(covars))
delta=np.random.normal(0,stepsize/2,len(covars))
knn.fit(xt*w0,yt)
tp=sum(np.where((knn.predict(xv*w0)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w0)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w0)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w0)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = recall
initscores.append(score)
score=np.mean(initscores)
scoreinit=score
#sum(np.where(knn.predict(xv*w0)==yv,1,0))/len(yv)
#sum(np.where(knn.predict(xt*w0)==yt,1,0))/len(yt)
wfin=[]
scores = []
while len(wfin)<30:
noupdate=0
deltachosen=False
score=scoreinit
stepsize=.1
delta=np.random.normal(0,stepsize/2,len(covars))
w0=np.ones(len(covars))
#iteration=0
while noupdate<120:
#iteration+=1
#val = iteration%4+1
score2list=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
w1 = w0+np.random.normal(delta,stepsize,len(covars))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w1,yt)
tp=sum(np.where((knn.predict(xv*w1)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w1)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w1)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w1)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score2 = sum(np.where(knn.predict(xv*w1)==yv,1,0))/len(yv)
score2 = recall
score2list.append(score2)
score2=np.mean(score2list)
if score2>score:
print(score2,score,"accepted",noupdate)
deltachosen==True
score=score2
delta = w1-w0
w0=w1
noupdate=0
else:
#print(score2,score)
noupdate+=1
if deltachosen==False:
delta=np.random.normal(0,stepsize/2,len(covars))
if noupdate%20==20:
deltachosen=False
stepsize=stepsize*.9
delta=np.random.normal(0,stepsize/2,len(covars))
if score>scoreinit:
wfin.append(w0)
scores.append(score)
wfin_arr=np.vstack(wfin)
print(np.mean(wfin_arr,axis=0))
print(np.std(wfin_arr,axis=0))
for i in range(12):
plt.hist(wfin_arr.T[i],bins=10)
plt.title(covars[i])
plt.show()
method=np.mean
xv = X[pd.Series(index).isin([6])].copy()
yv = Y[pd.Series(index).isin([6])].copy()
xt = X[pd.Series(index).isin([1,2,3,4,5])].copy()
yt = Y[pd.Series(index).isin([1,2,3,4,5])].copy()
wf=method(wfin_arr,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf,yt)
tp=sum(np.where((knn.predict(xv*wf)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
scores_ordered = sorted(range(len(scores)), key=lambda k: scores[k])
wfin_sorted = wfin_arr[scores_ordered]
wfin_selected = wfin_sorted[15:]
wf_sort=method(wfin_selected,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf_sort,yt)
tp=sum(np.where((knn.predict(xv*wf_sort)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf_sort)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf_sort)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf_sort)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
#BASE KNN Maximizing Precision
from sklearn.neighbors import KNeighborsClassifier
import warnings
warnings.filterwarnings("ignore")
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
acc = []
for i in list(range(2,30)):
avgscore=[]
for t in [1,2,3,4,5]:
xv = X[index==t].copy()
yv = Y[index==t].copy()
xt = X[~pd.Series(index).isin([t,6])].copy()
yt = Y[~pd.Series(index).isin([t,6])].copy()
knn = KNeighborsClassifier(n_neighbors=i,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
avgscore.append(score)
acc.append(np.mean(avgscore))
plt.plot(acc)
plt.xticks(list(range(28)),list(range(2,30)))
plt.show()
#k=18
k=17
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~ | pd.Series(index) | pandas.Series |
# -*- coding: utf-8 -*-
import copy
import datetime
from unittest import mock
import numpy
import pandas
from pandas import DataFrame
import pkg_resources
import pytest
from pandas_gbq import gbq
pytestmark = pytest.mark.filter_warnings(
"ignore:credentials from Google Cloud SDK"
)
pandas_installed_version = pkg_resources.get_distribution(
"pandas"
).parsed_version
def _make_connector(project_id="some-project", **kwargs):
return gbq.GbqConnector(project_id, **kwargs)
@pytest.fixture
def min_bq_version():
import pkg_resources
return pkg_resources.parse_version("1.11.0")
def mock_get_credentials_no_project(*args, **kwargs):
import google.auth.credentials
mock_credentials = mock.create_autospec(
google.auth.credentials.Credentials
)
return mock_credentials, None
def mock_get_credentials(*args, **kwargs):
import google.auth.credentials
mock_credentials = mock.create_autospec(
google.auth.credentials.Credentials
)
return mock_credentials, "default-project"
@pytest.fixture
def mock_service_account_credentials():
import google.oauth2.service_account
mock_credentials = mock.create_autospec(
google.oauth2.service_account.Credentials
)
return mock_credentials
@pytest.fixture
def mock_compute_engine_credentials():
import google.auth.compute_engine
mock_credentials = mock.create_autospec(
google.auth.compute_engine.Credentials
)
return mock_credentials
@pytest.fixture(autouse=True)
def no_auth(monkeypatch):
import pydata_google_auth
monkeypatch.setattr(pydata_google_auth, "default", mock_get_credentials)
@pytest.mark.parametrize(
("type_", "expected"),
[
("INTEGER", None), # Can't handle NULL
("BOOLEAN", None), # Can't handle NULL
("FLOAT", numpy.dtype(float)),
# TIMESTAMP will be localized after DataFrame construction.
("TIMESTAMP", "datetime64[ns]"),
("DATETIME", "datetime64[ns]"),
],
)
def test__bqschema_to_nullsafe_dtypes(type_, expected):
result = gbq._bqschema_to_nullsafe_dtypes(
[dict(name="x", type=type_, mode="NULLABLE")]
)
if not expected:
assert result == {}
else:
assert result == {"x": expected}
def test_GbqConnector_get_client_w_old_bq(monkeypatch, mock_bigquery_client):
gbq._test_google_api_imports()
connector = _make_connector()
monkeypatch.setattr(gbq, "HAS_CLIENT_INFO", False)
connector.get_client()
# No client_info argument.
mock_bigquery_client.assert_called_with(
credentials=mock.ANY, project=mock.ANY
)
def test_GbqConnector_get_client_w_new_bq(mock_bigquery_client):
gbq._test_google_api_imports()
pytest.importorskip(
"google.cloud.bigquery", minversion=gbq.BIGQUERY_CLIENT_INFO_VERSION
)
pytest.importorskip("google.api_core.client_info")
connector = _make_connector()
connector.get_client()
_, kwargs = mock_bigquery_client.call_args
assert kwargs["client_info"].user_agent == "pandas-{}".format(
pandas.__version__
)
def test_to_gbq_should_fail_if_invalid_table_name_passed():
with pytest.raises(gbq.NotFoundException):
gbq.to_gbq(DataFrame([[1]]), "invalid_table_name", project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(monkeypatch):
import pydata_google_auth
monkeypatch.setattr(
pydata_google_auth, "default", mock_get_credentials_no_project
)
with pytest.raises(ValueError, match="Could not determine project ID"):
gbq.to_gbq(DataFrame([[1]]), "dataset.tablename")
def test_to_gbq_with_verbose_new_pandas_warns_deprecation(min_bq_version):
import pkg_resources
pandas_version = pkg_resources.parse_version("0.23.0")
with pytest.warns(FutureWarning), mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [min_bq_version, pandas_version]
try:
gbq.to_gbq(
DataFrame([[1]]),
"dataset.tablename",
project_id="my-project",
verbose=True,
)
except gbq.TableCreationError:
pass
def test_to_gbq_with_not_verbose_new_pandas_warns_deprecation(min_bq_version):
import pkg_resources
pandas_version = pkg_resources.parse_version("0.23.0")
with pytest.warns(FutureWarning), mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [min_bq_version, pandas_version]
try:
gbq.to_gbq(
DataFrame([[1]]),
"dataset.tablename",
project_id="my-project",
verbose=False,
)
except gbq.TableCreationError:
pass
def test_to_gbq_wo_verbose_w_new_pandas_no_warnings(recwarn, min_bq_version):
import pkg_resources
pandas_version = pkg_resources.parse_version("0.23.0")
with mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [min_bq_version, pandas_version]
try:
gbq.to_gbq(
DataFrame([[1]]), "dataset.tablename", project_id="my-project"
)
except gbq.TableCreationError:
pass
assert len(recwarn) == 0
def test_to_gbq_with_verbose_old_pandas_no_warnings(recwarn, min_bq_version):
import pkg_resources
pandas_version = pkg_resources.parse_version("0.22.0")
with mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [min_bq_version, pandas_version]
try:
gbq.to_gbq(
DataFrame([[1]]),
"dataset.tablename",
project_id="my-project",
verbose=True,
)
except gbq.TableCreationError:
pass
assert len(recwarn) == 0
def test_to_gbq_with_private_key_raises_notimplementederror():
with pytest.raises(NotImplementedError, match="private_key"):
gbq.to_gbq(
DataFrame([[1]]),
"dataset.tablename",
project_id="my-project",
private_key="path/to/key.json",
)
def test_to_gbq_doesnt_run_query(
recwarn, mock_bigquery_client, min_bq_version
):
try:
gbq.to_gbq(
DataFrame([[1]]), "dataset.tablename", project_id="my-project"
)
except gbq.TableCreationError:
pass
mock_bigquery_client.query.assert_not_called()
def test_to_gbq_w_empty_df(mock_bigquery_client):
import google.api_core.exceptions
mock_bigquery_client.get_table.side_effect = google.api_core.exceptions.NotFound(
"my_table"
)
gbq.to_gbq(DataFrame(), "my_dataset.my_table", project_id="1234")
mock_bigquery_client.create_table.assert_called_with(mock.ANY)
mock_bigquery_client.load_table_from_dataframe.assert_not_called()
mock_bigquery_client.load_table_from_file.assert_not_called()
def test_to_gbq_creates_dataset(mock_bigquery_client):
import google.api_core.exceptions
mock_bigquery_client.get_table.side_effect = google.api_core.exceptions.NotFound(
"my_table"
)
mock_bigquery_client.get_dataset.side_effect = google.api_core.exceptions.NotFound(
"my_dataset"
)
gbq.to_gbq(DataFrame([[1]]), "my_dataset.my_table", project_id="1234")
mock_bigquery_client.create_dataset.assert_called_with(mock.ANY)
def test_read_gbq_with_no_project_id_given_should_fail(monkeypatch):
import pydata_google_auth
monkeypatch.setattr(
pydata_google_auth, "default", mock_get_credentials_no_project
)
with pytest.raises(ValueError, match="Could not determine project ID"):
gbq.read_gbq("SELECT 1", dialect="standard")
def test_read_gbq_with_inferred_project_id(monkeypatch):
df = gbq.read_gbq("SELECT 1", dialect="standard")
assert df is not None
def test_read_gbq_with_inferred_project_id_from_service_account_credentials(
mock_bigquery_client, mock_service_account_credentials
):
mock_service_account_credentials.project_id = "service_account_project_id"
df = gbq.read_gbq(
"SELECT 1",
dialect="standard",
credentials=mock_service_account_credentials,
)
assert df is not None
mock_bigquery_client.query.assert_called_once_with(
"SELECT 1",
job_config=mock.ANY,
location=None,
project="service_account_project_id",
)
def test_read_gbq_without_inferred_project_id_from_compute_engine_credentials(
mock_compute_engine_credentials,
):
with pytest.raises(ValueError, match="Could not determine project ID"):
gbq.read_gbq(
"SELECT 1",
dialect="standard",
credentials=mock_compute_engine_credentials,
)
def test_read_gbq_with_max_results_zero(monkeypatch):
df = gbq.read_gbq("SELECT 1", dialect="standard", max_results=0)
assert df is None
def test_read_gbq_with_max_results_ten(monkeypatch, mock_bigquery_client):
df = gbq.read_gbq("SELECT 1", dialect="standard", max_results=10)
assert df is not None
mock_bigquery_client.list_rows.assert_called_with(mock.ANY, max_results=10)
def test_read_gbq_with_verbose_new_pandas_warns_deprecation(min_bq_version):
import pkg_resources
pandas_version = pkg_resources.parse_version("0.23.0")
with pytest.warns(FutureWarning), mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [min_bq_version, pandas_version]
gbq.read_gbq("SELECT 1", project_id="my-project", verbose=True)
def test_read_gbq_with_not_verbose_new_pandas_warns_deprecation(
min_bq_version,
):
import pkg_resources
pandas_version = pkg_resources.parse_version("0.23.0")
with pytest.warns(FutureWarning), mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [min_bq_version, pandas_version]
gbq.read_gbq("SELECT 1", project_id="my-project", verbose=False)
def test_read_gbq_wo_verbose_w_new_pandas_no_warnings(recwarn, min_bq_version):
import pkg_resources
pandas_version = pkg_resources.parse_version("0.23.0")
with mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [min_bq_version, pandas_version]
gbq.read_gbq("SELECT 1", project_id="my-project", dialect="standard")
assert len(recwarn) == 0
def test_read_gbq_with_old_bq_raises_importerror():
import pkg_resources
bigquery_version = pkg_resources.parse_version("0.27.0")
with pytest.raises(ImportError, match="google-cloud-bigquery"), mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [bigquery_version]
gbq.read_gbq(
"SELECT 1", project_id="my-project",
)
def test_read_gbq_with_verbose_old_pandas_no_warnings(recwarn, min_bq_version):
import pkg_resources
pandas_version = pkg_resources.parse_version("0.22.0")
with mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [min_bq_version, pandas_version]
gbq.read_gbq(
"SELECT 1",
project_id="my-project",
dialect="standard",
verbose=True,
)
assert len(recwarn) == 0
def test_read_gbq_with_private_raises_notimplmentederror():
with pytest.raises(NotImplementedError, match="private_key"):
gbq.read_gbq(
"SELECT 1", project_id="my-project", private_key="path/to/key.json"
)
def test_read_gbq_with_invalid_dialect():
with pytest.raises(ValueError, match="is not valid for dialect"):
gbq.read_gbq("SELECT 1", dialect="invalid")
def test_read_gbq_with_configuration_query():
df = gbq.read_gbq(None, configuration={"query": {"query": "SELECT 2"}})
assert df is not None
def test_read_gbq_with_configuration_duplicate_query_raises_error():
with pytest.raises(
ValueError, match="Query statement can't be specified inside config"
):
gbq.read_gbq(
"SELECT 1", configuration={"query": {"query": "SELECT 2"}}
)
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with pytest.warns(FutureWarning):
df = | DataFrame([[1, "two"], [3, "four"]]) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""Input Output (IO) helpers.
.. plot::
:context: reset
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['axes.grid'] = True
import spaudiopy as spa
"""
import os
from warnings import warn
import multiprocessing
import json
from datetime import datetime
import numpy as np
import pandas as pd
from scipy.io import loadmat, savemat
import h5py
import soundfile as sf
from . import utils, sig, decoder, sdm, grids, sph, process, __version__
def load_audio(filenames, fs=None):
"""Load mono and multichannel audio from files.
Parameters
----------
filenames : string or list of strings
Audio files.
Returns
-------
sig : sig.MonoSignal or sig.MultiSignal
Audio signal.
"""
loaded_data = []
loaded_fs = []
# pack in list if only a single string
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
for file in filenames:
data, fs_file = sf.read(file)
if data.ndim != 1:
# detect and split interleaved wav
for c in data.T:
loaded_data.append(c)
else:
loaded_data.append(data)
loaded_fs.append(fs_file)
# Assert same sample rate for all channels
assert all(x == loaded_fs[0] for x in loaded_fs)
# Check against provided samplerate
if fs is not None:
if fs != loaded_fs[0]:
raise ValueError("File: Found different fs:" + str(loaded_fs[0]))
else:
fs = loaded_fs[0]
# MonoSignal or MultiSignal
if len(loaded_data) == 1:
return sig.MonoSignal(loaded_data, fs=fs)
else:
return sig.MultiSignal([*loaded_data], fs=fs)
def save_audio(signal, filename, fs=None, subtype='FLOAT'):
"""Save signal to audio file.
Parameters
----------
signal : sig. MonoSignal, sig.MultiSignal or np.ndarray
Audio Signal, forwarded to sf.write(); (frames x channels).
filename : string
Audio file name.
fs : int
fs(t).
subtype : optional
"""
# assert(isinstance(signal, (sig.MonoSignal, sig.MultiSignal)))
if isinstance(sig, sig.MonoSignal):
if fs is not None:
assert(signal.fs == fs)
if type(signal) == sig.MonoSignal:
data = signal.signal
data_fs = signal.fs
elif type(signal) in (sig.MultiSignal, sig.AmbiBSignal):
data = signal.get_signals().T
data_fs = signal.fs
elif isinstance(signal, (np.ndarray, np.generic)):
data = signal
data_fs = fs
else:
raise NotImplementedError('Data type not supported.')
sf.write(filename, data, data_fs, subtype=subtype)
def load_hrirs(fs, filename=None):
"""Convenience function to load 'HRTF.mat'.
The file contains ['hrir_l', 'hrir_r', 'fs', 'azi', 'colat'].
Parameters
----------
fs : int
fs(t).
filename : string, optional
HRTF.mat file or default set, or 'dummy' for debugging.
Returns
-------
HRIRs : sig.HRIRs instance
left : (g, h) numpy.ndarray
h(t) for grid position g.
right : (g, h) numpy.ndarray
h(t) for grid position g.
grid : (g, 2) pandas.dataframe
[azi: azimuth, colat: colatitude] for hrirs.
fs : int
fs(t).
"""
if filename == 'dummy':
azi, colat, _ = grids.gauss(15)
grid = pd.DataFrame({'azi': azi, 'colat': colat})
# Create diracs as dummy
hrir_l = np.zeros([grid.shape[0], 256])
hrir_l[:, 0] = np.ones(hrir_l.shape[0])
hrir_r = np.zeros_like(hrir_l)
hrir_r[:, 0] = np.ones(hrir_r.shape[0])
hrir_fs = fs
elif filename is None:
# default
if fs not in [44100, 48000, 96000]:
raise NotImplementedError('44100, 48000, 96000'
' default available.')
default_file = '../data/' + 'HRTF_default_' + str(fs) + '.mat'
current_file_dir = os.path.dirname(__file__)
filename = os.path.join(current_file_dir, default_file)
try:
mat = loadmat(filename)
except FileNotFoundError:
warn("No default hrirs. Generating them...")
get_default_hrirs()
mat = loadmat(filename)
else:
mat = loadmat(filename)
if not filename == 'dummy':
hrir_l = np.array(np.squeeze(mat['hrir_l']), dtype=float)
hrir_r = np.array(np.squeeze(mat['hrir_r']), dtype=float)
try:
hrir_fs = int(mat['fs'])
except KeyError:
hrir_fs = int(mat['SamplingRate'])
azi = np.array(np.squeeze(mat['azi']), dtype=float)
colat = np.array(np.squeeze(mat['colat']), dtype=float)
grid = | pd.DataFrame({'azi': azi, 'colat': colat}) | pandas.DataFrame |
import os.path
import pickle
import pytest
import pandas as pd
from ...source.tests.util import verify_datasource_interface
from .util import assert_items_equal
from intake import Catalog
TEST_CATALOG_PATH = os.path.join(os.path.dirname(__file__), 'catalog1.yml')
def test_info_describe(intake_server):
catalog = Catalog(intake_server)
assert_items_equal(list(catalog), ['use_example1', 'nested', 'entry1',
'entry1_part', 'remote_env',
'local_env', 'text', 'arr'])
info = catalog['entry1'].describe()
assert info == {
'container': 'dataframe',
'description': 'entry1 full',
'name': 'entry1',
'direct_access': 'forbid',
'user_parameters': []
}
info = catalog['entry1_part'].describe()
assert info['direct_access'] == 'allow'
def test_bad_url(intake_server):
bad_url = intake_server + '/nonsense_prefix'
with pytest.raises(Exception):
Catalog(bad_url)
def test_metadata(intake_server):
catalog = Catalog(intake_server)
assert hasattr(catalog, 'metadata')
assert catalog['metadata']['test'] is True
assert catalog.version == 1
def test_unknown_source(intake_server):
catalog = Catalog(intake_server)
with pytest.raises(Exception):
catalog['does_not_exist'].describe()
def test_remote_datasource_interface(intake_server):
catalog = Catalog(intake_server)
d = catalog['entry1'].get()
verify_datasource_interface(d)
def test_environment_evaluation(intake_server):
catalog = Catalog(intake_server)
import os
os.environ['INTAKE_TEST'] = 'client'
d = catalog['remote_env']
def test_read(intake_server):
catalog = Catalog(intake_server)
d = catalog['entry1'].get()
test_dir = os.path.dirname(__file__)
file1 = os.path.join(test_dir, 'entry1_1.csv')
file2 = os.path.join(test_dir, 'entry1_2.csv')
expected_df = pd.concat((pd.read_csv(file1), pd.read_csv(file2)))
meta = expected_df[:0]
info = d.discover()
assert info['datashape'] is None
assert info['dtype'] == {k: str(v) for k, v
in meta.dtypes.to_dict().items()}
assert info['npartitions'] == 2
assert info['shape'] == (None, 3) # Do not know CSV size ahead of time
md = d.metadata.copy()
md.pop('catalog_dir', None)
assert md == dict(foo='bar', bar=[1, 2, 3], cache=[])
df = d.read()
assert expected_df.equals(df)
def test_read_direct(intake_server):
catalog = Catalog(intake_server)
d = catalog['entry1_part'].get(part='2')
test_dir = os.path.dirname(__file__)
file2 = os.path.join(test_dir, 'entry1_2.csv')
expected_df = pd.read_csv(file2)
meta = expected_df[:0]
info = d.discover()
assert info['datashape'] is None
assert info['dtype'] == {k: str(v) for k, v
in meta.dtypes.to_dict().items()}
assert info['npartitions'] == 1
assert info['shape'] == (None, 3) # Do not know CSV size ahead of time
md = info['metadata'].copy()
md.pop('catalog_dir', None)
assert md == {'bar': [2, 4, 6], 'foo': 'baz', 'cache': []}
md = d.metadata.copy()
md.pop('catalog_dir', None)
assert md == dict(foo='baz', bar=[2, 4, 6], cache=[])
assert d.description == 'entry1 part'
df = d.read()
assert expected_df.equals(df)
def test_read_chunks(intake_server):
catalog = Catalog(intake_server)
d = catalog.entry1.get()
chunks = list(d.read_chunked())
assert len(chunks) == 2
test_dir = os.path.dirname(__file__)
file1 = os.path.join(test_dir, 'entry1_1.csv')
file2 = os.path.join(test_dir, 'entry1_2.csv')
expected_df = pd.concat((pd.read_csv(file1), pd.read_csv(file2)))
assert expected_df.equals(pd.concat(chunks))
def test_read_partition(intake_server):
catalog = Catalog(intake_server)
d = catalog.entry1.get()
p2 = d.read_partition(1)
p1 = d.read_partition(0)
test_dir = os.path.dirname(__file__)
file1 = os.path.join(test_dir, 'entry1_1.csv')
file2 = os.path.join(test_dir, 'entry1_2.csv')
assert | pd.read_csv(file1) | pandas.read_csv |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
'''
This code compares the loc and iloc in pandas dataframe
'''
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import pandas as pd
import timeit
df_test = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
def mask_step(x, step):
"""
Create a mask to only contain the step-th element starting from the first element. Used to downsample
"""
mask = np.zeros_like(x)
mask[::step] = 1
return mask.astype(bool)
def downsample(df, step):
"""
Downsample data by the given step. Example, SDD is recorded in 30 fps, with step=30, the fps of the resulting
df will become 1 fps. With step=12 the result will be 2.5 fps. It will do so individually for each unique
pedestrian (metaId)
:param df: pandas DataFrame - necessary to have column 'metaId'
:param step: int - step size, similar to slicing-step param as in array[start:end:step]
:return: pd.df - downsampled
"""
mask = df.groupby(['metaId'])['metaId'].transform(mask_step, step=step)
return df[mask]
def filter_short_trajectories(df, threshold):
"""
Filter trajectories that are shorter in timesteps than the threshold
:param df: pandas df with columns=['x', 'y', 'frame', 'trackId', 'sceneId', 'metaId']
:param threshold: int - number of timesteps as threshold, only trajectories over threshold are kept
:return: pd.df with trajectory length over threshold
"""
len_per_id = df.groupby(by='metaId', as_index=False).count() # sequence-length for each unique pedestrian
idx_over_thres = len_per_id[len_per_id['frame'] >= threshold] # rows which are above threshold
idx_over_thres = idx_over_thres['metaId'].unique() # only get metaIdx with sequence-length longer than threshold
df = df[df['metaId'].isin(idx_over_thres)] # filter df to only contain long trajectories
return df
def groupby_sliding_window(x, window_size, stride):
x_len = len(x)
n_chunk = (x_len - window_size) // stride + 1
idx = []
metaId = []
for i in range(n_chunk):
idx += list(range(i * stride, i * stride + window_size))
metaId += ['{}_{}'.format(x.metaId.unique()[0], i)] * window_size
# temp = x.iloc()[(i * stride):(i * stride + window_size)]
# temp['new_metaId'] = '{}_{}'.format(x.metaId.unique()[0], i)
# df = df.append(temp, ignore_index=True)
df = x.iloc()[idx]
df['newMetaId'] = metaId
return df
def sliding_window(df, window_size, stride):
"""
Assumes downsampled df, chunks trajectories into chunks of length window_size. When stride < window_size then
chunked trajectories are overlapping
:param df: df
:param window_size: sequence-length of one trajectory, mostly obs_len + pred_len
:param stride: timesteps to move from one trajectory to the next one
:return: df with chunked trajectories
"""
gb = df.groupby(['metaId'], as_index=False)
df = gb.apply(groupby_sliding_window, window_size=window_size, stride=stride)
df['metaId'] = pd.factorize(df['newMetaId'], sort=False)[0]
df = df.drop(columns='newMetaId')
df = df.reset_index(drop=True)
return df
def split_at_fragment_lambda(x, frag_idx, gb_frag):
""" Used only for split_fragmented() """
metaId = x.metaId.iloc()[0]
counter = 0
if metaId in frag_idx:
split_idx = gb_frag.groups[metaId]
for split_id in split_idx:
x.loc[split_id:, 'newMetaId'] = '{}_{}'.format(metaId, counter)
counter += 1
return x
def split_fragmented(df):
"""
Split trajectories when fragmented (defined as frame_{t+1} - frame_{t} > 1)
Formally, this is done by changing the metaId at the fragmented frame and below
:param df: DataFrame containing trajectories
:return: df: DataFrame containing trajectories without fragments
"""
gb = df.groupby('metaId', as_index=False)
# calculate frame_{t+1} - frame_{t} and fill NaN which occurs for the first frame of each track
df['frame_diff'] = gb['frame'].diff().fillna(value=1.0).to_numpy()
fragmented = df[df['frame_diff'] != 1.0] # df containing all the first frames of fragmentation
gb_frag = fragmented.groupby('metaId') # helper for gb.apply
frag_idx = fragmented.metaId.unique() # helper for gb.apply
df['newMetaId'] = df['metaId'] # temporary new metaId
df = gb.apply(split_at_fragment_lambda, frag_idx, gb_frag)
df['metaId'] = pd.factorize(df['newMetaId'], sort=False)[0]
df = df.drop(columns='newMetaId')
return df
def load_inD(path='inD-dataset-v1.0/data', scenes=[1], recordings=None):
'''
Loads data from inD Dataset. Makes the following preprocessing:
-filter out unnecessary columns
-filter out non-pedestrian
-makes new unique ID (column 'metaId') since original dataset resets id for each scene
-add scene name to column for visualization
-output has columns=['trackId', 'frame', 'x', 'y', 'sceneId', 'metaId']
:param path: str - path to folder, default is 'data/inD'
:param scenes: list of integers - scenes to load
:param recordings: list of strings - alternative to scenes, load specified recordings instead, overwrites scenes
:return: DataFrame containing all trajectories from split
'''
scene2rec = {1: ['00', '01', '02', '03', '04', '05', '06'],
2: ['07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17'],
3: ['18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29'],
4: ['30', '31', '32']}
rec_to_load = []
for scene in scenes:
rec_to_load.extend(scene2rec[scene])
if recordings is not None:
rec_to_load = recordings
data = []
for rec in rec_to_load:
# load csv
track = pd.read_csv(os.path.join(path, '{}_tracks.csv'.format(rec)))
track = track.drop(columns=['trackLifetime', 'heading', 'width', 'length', 'xVelocity', 'yVelocity',
'xAcceleration', 'yAcceleration', 'lonVelocity', 'latVelocity',
'lonAcceleration', 'latAcceleration'])
track_meta = pd.read_csv(os.path.join(path, '{}_tracksMeta.csv'.format(rec)))
# Filter non-pedestrians
pedestrians = track_meta[track_meta['class'] == 'pedestrian']
track = track[track['trackId'].isin(pedestrians['trackId'])]
track['rec&trackId'] = [str(recId) + '_' + str(trackId).zfill(6) for recId, trackId in
zip(track.recordingId, track.trackId)]
track['sceneId'] = rec
track['yCenter'] = -track['yCenter']
# Filter all trajectories outside the scene frame, ie negative values
track = track[(track['yCenter'] >= 0) & (track['xCenter'] >= 0)]
data.append(track)
data = | pd.concat(data, ignore_index=True) | pandas.concat |
from scipy.spatial.distance import cdist
from matplotlib import pyplot as plt
from joblib import dump, load
import numpy as np
import pandas as pd
from sklearn.cluster import DBSCAN
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.xmeans import xmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from sklearn import metrics
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
from random import random
import csv
import config
import os
import time
from beacon import *
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# utilizado para reproduzir os slots da rua
lenght_via = config.GERAL["lenght_via"]
interval = config.GERAL["interval"]
# define o método de redução
send_method = config.GERAL["Configuracao"]
# caminho local
RemotePathFiles = config.CONSTANTES["RemotePathFiles"]
# reusmos
MEDIA_LOS=0.0
MEDIANA_LOS=0.0
# clusteres
MEDIA_CLUSTER=0.0
MEDIA_RUIDO=0.0
COUNT_CLUSTERES=0
COUNT_RUIDO=0
COUNT_DATA = 0
# Sporte aos Tipos de baseline
BASELINE1TO2=0
BASELINETHRESHOLD=5.0
# arquivo final
SIZE_FINAL_FILE=0.0
# datasets de suporte
DF_PREVIOUS= | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 5 15:46:16 2019
@author: tungutokyo
"""
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None
| pd.set_option("display.max_columns", 60) | pandas.set_option |
# python3
import argparse
from Bio import SeqIO
from Bio.SeqUtils.ProtParam import ProteinAnalysis
import pandas as pd
# imput parameters
ap = argparse.ArgumentParser(description="outputs the id, pI, charge and molecular weight of each protein")
ap.add_argument("-in", "--input", required=True, help="input fasta file")
ap.add_argument("-txt", "--txt", required=False, help=" 1-column txt file with pH values, to calculate the protein charge")
ap.add_argument("-pH", "--pH", type=float, default=7.0, required=False,help="pH to calculate the protein charge(default is 7.0)")
ap.add_argument("-pro", "--program", type=int, default=1, required=False, help="program to select 1) 1 pH value , 2) many pH values 1 per protein, 3) many pH values for all proteins. Default is 1")
ap.add_argument("-out", "--output", required=True, help="output txt file with id, pI, charge, molecular weight and pH columns")
args = vars(ap.parse_args())
# main
headers = []
seqs = []
pI = []
charge = []
mw = [] # setup empty lists
# choose program
# same pH value for all proteins
if args['program'] == 1:
for record in SeqIO.parse(args['input'], "fasta"):
headers.append(record.id)
prot = ProteinAnalysis(str(record.seq))
pI.append(round(prot.isoelectric_point(), 2))
mw.append(round(prot.molecular_weight(), 2))
charge.append(round(prot.charge_at_pH(args['pH']), 2))
# create data frame
df = pd.DataFrame()
df['id'] = headers
df['pI'] = pI
df['charge'] = charge
df['mw'] = mw
df['pH'] = args['pH']
# export
with open(args['output'], 'a') as f:
f.write(
df.to_csv(header = True, index = False, sep = '\t', doublequote= False, line_terminator= '\n')
)
# 1 pH value for each protein
elif args['program'] == 2:
for record in SeqIO.parse(args['input'], "fasta"):
headers.append(record.id)
seqs.append(record.seq)
# import txt file with pH values
with open(args['txt'], 'r') as f:
ph_values = f.readlines()
ph_values = [x.strip() for x in ph_values]
# calculate the properties using a pair of the above 2 lists
for (a, b) in zip(seqs, ph_values):
prot = ProteinAnalysis(str(a))
pI.append(round(prot.isoelectric_point(), 2))
mw.append(round(prot.molecular_weight(), 2))
charge.append(round(prot.charge_at_pH(float(b)), 2))
# create data frame
df = pd.DataFrame()
df['id'] = headers
df['pI'] = pI
df['charge'] = charge
df['mw'] = mw
df['pH'] = ph_values
# export
with open(args['output'], 'a') as f:
f.write(
df.to_csv(header = True, index = False, sep = '\t', doublequote= False, line_terminator= '\n')
)
# many pH values for each protein
else:
# setup empty list
physioprot = []
# import txt file with pH values
with open(args['txt'], 'r') as f:
ph_values = f.readlines()
ph_values = [x.strip() for x in ph_values]
# iterate for each pH value in each fasta record
for i in ph_values:
for record in SeqIO.parse(args['input'], "fasta"):
headers.append(record.id)
prot = ProteinAnalysis(str(record.seq))
pI.append(round(prot.isoelectric_point(), 2))
mw.append(round(prot.molecular_weight(), 2))
charge.append(round(prot.charge_at_pH(float(i)), 2))
# create data frame
df = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.