prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#This PSST file, originally due to <NAME>, has been modified by <NAME> to return the sorted list of LMPs.
import pandas as pd
import click
class PSSTResults(object):
def __init__(self, model):
self._model = model
self._maximum_hours = 24
@property
def production_cost(self):
m = self._model
st = 'SecondStage'
return sum([m.ProductionCost[g, t].value for t in m.GenerationTimeInStage[st] if t < self._maximum_hours for g in m.Generators])
@property
def commitment_cost(self):
m = self._model
st = 'FirstStage'
return sum([m.StartupCost[g, t].value + m.ShutdownCost[g, t].value for g in m.Generators for t in m.CommitmentTimeInStage[st] if t < self._maximum_hours])
@property
def noload_cost(self):
m = self._model
st = 'FirstStage'
return sum([sum([m.UnitOn[g, t].value for t in m.CommitmentTimeInStage[st] if t < self._maximum_hours]) * m.MinimumProductionCost[g].value * m.TimePeriodLength.value for g in m.Generators])
@property
def unit_commitment(self):
df = self._get('UnitOn', self._model)
return df.clip_lower(0)
@property
def line_power(self):
return self._get('LinePower', self._model)
@property
def angles(self):
return self._get('Angle', self._model)
@property
def maximum_power_available(self):
return self._get('MaximumPowerAvailable', self._model)
@property
def minimum_power_available(self):
return self._get('MinimumPowerAvailable', self._model)
@property
def power_generated(self):
return self._get('PowerGenerated', self._model)
@property
def slack_variables(self):
return self._get('LoadGenerateMismatch', self._model)
@property
def regulating_reserve_up_available(self):
return self._get('RegulatingReserveUpAvailable', self._model)
@property
def maximum_power_output(self):
return self._get('MaximumPowerOutput', self._model, self._model.Generators)
@property
def maximum_line_power(self):
return self._get('ThermalLimit', self._model, self._model.TransmissionLines)
@property
def lmp(self):
return self._get('PowerBalance', self._model, dual=True)
@property
def reserve_down_dual(self):
return self._get('EnforceReserveDownRequirements', self._model, dual=True)
@property
def reserve_up_dual(self):
return self._get('EnforceReserveUpRequirements', self._model, dual=True)
@property
def hot_start(self):
return self._get('HotStart', self._model)
@property
def hot_start(self):
return self._get('StartupCost', self._model)
@staticmethod
def _get(attribute, model, set1=None, set2=None, dual=False):
_dict = dict()
if set1 is not None and set2 is None:
for s1 in set1:
_dict[s1] = getattr(model, attribute)[s1]
return pd.Series(_dict)
else:
if set1 is None and set2 is None:
set1 = set()
set2 = set()
if attribute is 'PowerBalance':
index = getattr(model, attribute + '_index')
for i, j in index:
set1.add(i)
set2.add(j)
elif attribute is 'EnforceReserveDownRequirements':
index = getattr(model, attribute)
_dict = list()
for i in index:
_dict.append(model.dual.get(getattr(model,attribute)[i]))
return pd.DataFrame(_dict)
elif attribute is 'EnforceReserveUpRequirements':
index = getattr(model, attribute)
_dict = list()
for i in index:
_dict.append(model.dual.get(getattr(model,attribute)[i]))
return pd.DataFrame(_dict)
for s1 in set1:
_dict[s1] = list()
for s2 in set2:
if dual is True:
_dict[s1].append(model.dual.get(getattr(model, attribute)[s1, s2]))
else:
_dict[s1].append(getattr(model, attribute)[s1, s2].value)
pd_unsorted = | pd.DataFrame(_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with datetime-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(pd.timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
# ------------------------------------------------------------------
# Operations with timedelta-like others (including DateOffsets)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - NaT
tm.assert_equal(res, expected)
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with timedelta-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="__mul__ op treats "
"timedelta other as i8; "
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box, two, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser / two
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])])
def test_td64arr_mul_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
# TODO: Make this up-casting more systematic?
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = tdser * vector
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_rmul_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])])
def test_td64arr_div_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = tdser / vector
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
vector / tdser
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_mul_int_series(self, box, names):
# GH#19042 test for correct name attachment
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(['0days', '1day', '4days', '9days', '16days'],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pd.DataFrame
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_float_series_rdiv_td64arr(self, box, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
expected = Series([tdi[n] / ser[n] for n in range(len(ser))],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser.__rdiv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeInvalidArithmeticOps(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="raises ValueError "
"instead of TypeError",
strict=True))
])
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_pow_invalid(self, scalar_td, box):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = | tm.box_expected(td1, box) | pandas.util.testing.box_expected |
# ------------------------------------------------------------------------------#
# (1)
# Write min. 2 functions which handle the reading, processing and visualization
# of a time series of transactions for one location (dependant on an argument)
# (you can use the sum, mean or median) for the transactions on one day.
import pandas as pd
import pylab as pl
import seaborn as sns
import matplotlib.pyplot as plt
def visualize_mean_transactions_at_location_over_time(df, location):
grouped_df = df.query(f'location == \"{location}\"')
mean_price = grouped_df.groupby('date')['price'].mean()
mean_price.plot()
plt.show()
def visualize_sum_of_transactions_at_location_over_time(df, location):
grouped_df = df.query(f'location == \"{location}\"')
summed_price = grouped_df.groupby('date')['price'].sum()
summed_price.plot()
plt.show()
# reading a csv_file (credit card data from the VAST Mini Challange 2021)
# test your function(s) for <Coffee Cameleon> and <Brew've Been Served>
# plot("Coffee Cameleon")
# plot("Brew've Been Served")
def test_visualize_above():
cc_df = pd.read_csv("data/cc_data_1.csv")
visualize_mean_transactions_at_location_over_time(cc_df, 'Coffee Cameleon')
visualize_sum_of_transactions_at_location_over_time(cc_df, 'Coffee Cameleon')
visualize_mean_transactions_at_location_over_time(cc_df, 'Brew\'ve Been Served')
visualize_sum_of_transactions_at_location_over_time(cc_df, 'Brew\'ve Been Served')
# test_visualize_above()
# (2) - optional
# Create a heatmap (https://seaborn.pydata.org/generated/seaborn.heatmap.html)
# using seaborn for every location (y) and every date (x) and the
# sum of the price on the specific day (z).
# Hints: 1 - first build a list of dictionaries and convert it then to
# a df to visualize
# 2 - use iterrows in this way to iterate through a df:
# for index, row in df.iterrows:
# row.location
def heatmap_location_date(df):
data_by_location = {}
locs = df['location'].unique()
for loc in locs:
ta_in_loc = df.query(f'location == \"{loc}\"')
data_by_location[loc] = ta_in_loc.groupby('date').size()
df_by_location = | pd.DataFrame(data_by_location) | pandas.DataFrame |
import os
import re
import string
from collections import Counter
import numpy as np
import pandas as pd
import spacy
from sklearn.linear_model import ElasticNet
from spacy.cli import download
try:
nlp = spacy.load("en")
except Exception:
download("en")
nlp = spacy.load("en")
class CleanDataframe:
def remove_names(self, text):
"""
Parameters
--------
text: str
Returns
--------
cleaned_text: str
"""
all_names = pd.read_pickle(
os.path.join(os.path.dirname(__file__), "data/all_names")
)
cleaned_text = text
for _, row in all_names.iterrows():
# Matches name as long as it is not followed by lowercase characters
# Removing names that are a part of another word
cleaned_text = re.sub(row["name"] + "(?![a-z])", " ", cleaned_text)
return cleaned_text
def remove_pii(self, text):
"""
Remove common patterns of personally identifiable information (PII)
Parameters
--------
text: str
Returns
--------
cleaned_text: str
"""
regex_dict = {
"credit_card_numbers": r"(?:\d[ -]*?){13,16}",
"phone_numbers": r"[\+]?[\d]{0,3}[\s]?[\(]?\d{3}[\)]?[\s\-\.]{0,1}\d{3}[\s\-\.]{0,1}\d{4}",
"social_security_numbers": r"(\d{3}[-\s]?\d{2}[-\s]?\d{4})",
"ip_addresses": r"((?:[0-9]{1,3}\.){3}[0-9]{1,3})",
"email_addresses": r"([a-zA-Z0-9_\.-]+)@([1-9a-zA-Z\.-]+)\.([a-zA-Z\.]{2,6})",
"urls": r"((https?:\/\/)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,255}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*))",
}
cleaned_text = text
for this_pii_item in regex_dict:
cleaned_text = re.sub(
regex_dict[this_pii_item],
"",
cleaned_text,
)
return cleaned_text
def remove_links(self, text):
"""
Parameters
--------
text: str
Returns
--------
cleaned_text: str
"""
cleaned_text = text
links_found = re.findall(
r"(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9]\.[^\s]{2,})",
cleaned_text,
)
for link in links_found:
cleaned_text = cleaned_text.replace(link, "")
return cleaned_text
def lematize(self, text):
"""
Parameters
--------
text: str
Returns
--------
list of spacy tokens
"""
spacy_text = nlp(text)
return [token.lemma_ for token in spacy_text if not token.is_space]
def remove_email_greetings_signatures(self, text):
"""
In order to obtain the main text of an email only, this method removes greetings, signoffs,
and signatures by identifying sentences with less than 5% verbs to drop. Does not replace links.
Inspiration from: https://github.com/mynameisvinn/EmailParser
Parameters
--------
text: str
Returns
--------
text: str
"""
sentences = text.strip().split("\n")
non_sentences = []
for sentence in sentences:
spacy_text = nlp(sentence.strip())
verb_count = np.sum(
[
(
token.pos_ == "VERB"
or token.pos_ == "AUX"
or token.pos_ == "ROOT"
or token.pos_ == "pcomp"
)
for token in spacy_text
]
)
try:
prob = float(verb_count) / len(spacy_text)
except Exception:
prob = 1.0
# If 5% or less of a sentence is verbs, it's probably not a real sentence
if prob <= 0.05:
non_sentences.append(sentence)
for non_sentence in non_sentences:
# Don't replace links
if "http" not in non_sentence and non_sentence not in string.punctuation:
text = text.replace(non_sentence, "")
return text
def clean_column_names(self, df):
"""
Rename all columns to use underscores to reference columns without bracket formatting
Parameters
--------
df: DataFrame
Returns
--------
df: DataFrame
"""
df.rename(
columns=lambda x: str(x).strip().replace(" ", "_").lower(), inplace=True
)
return df
def remove_duplicate_columns(self, df):
"""
Remove columns with the same name
Parameters
--------
df: DataFrame
Returns
--------
df: DataFrame
"""
df = df.loc[:, ~df.columns.duplicated()]
return df
def fix_col_data_type(self, df, col, desired_dt):
"""
Change column datatype using the best method for each type.
Parameters
--------
df: DataFrame
col: str
Column to change the dtype for
desired_dt: str
{'float', 'int', 'datetime', 'str'}
Returns
--------
df: DataFrame
"""
if desired_dt in ("float", "int"):
df[col] = pd.to_numeric(df[col], errors="coerce")
elif desired_dt == "datetime":
df[col] = pd.to_datetime(df[col], errors="coerce")
elif desired_dt == "str":
df[col] = df[col].astype(str)
return df
def compress_df(self, df):
"""
Compresses each dataframe column as much as possible depending on type and values.
Parameters
--------
df: DataFrame
Returns
--------
df: DataFrame
"""
for col in df.columns:
if df[col].dtype == "O":
unique_vals = df[col].nunique()
count_vals = df[col].shape[0]
if unique_vals < (count_vals * 0.5):
df[col] = df[col].astype("category")
elif df[col].dtype == "int64":
df[col] = | pd.to_numeric(df[col], downcast="unsigned") | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
These test the private routines in types/cast.py
"""
import pytest
from datetime import datetime, timedelta, date
import numpy as np
import pandas as pd
from pandas import (Timedelta, Timestamp, DatetimeIndex,
DataFrame, NaT, Period, Series)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_convert_objects,
cast_scalar_to_array,
infer_dtype_from_scalar,
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype)
from pandas.core.dtypes.common import (
is_dtype_equal)
from pandas.util import testing as tm
class TestMaybeDowncast(object):
def test_downcast_conv(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# GH16875 coercing of bools
ser = | Series([True, True, False]) | pandas.Series |
import json
from pathlib import Path
import pandas as pd
from os import path
from collections import Counter
from configs import Level, LEVEL_MAP, PROCESS_METRICS_FIELDS
from db.QueryBuilder import get_level_refactorings
from refactoring_statistics.plot_utils import box_plot_seaborn
from refactoring_statistics.query_utils import retrieve_columns, get_metrics_stable_level
from utils.log import log_init, log_close, log
import datetime
import time
INPUT_DIRECTORY = "results/predictions/reproduction/"
SAVE_DIRECTORY = "results/Evaluation/reproduction/"
# metrics
CLASS_METRICS_Fields = ["classCbo",
# "classLcom", to large for plotting
"classLCC",
"classTCC",
"classRfc",
"classWmc"]
CLASS_ATTRIBUTES_QTY_Fields = ["classUniqueWordsQty",
"classNumberOfMethods",
"classStringLiteralsQty",
"classNumberOfPublicFields",
"classVariablesQty",
# "classLoc" to large for plotting
]
ALL_METRICS = CLASS_METRICS_Fields + CLASS_ATTRIBUTES_QTY_Fields + PROCESS_METRICS_FIELDS
# import all json files in the given directory and return them as pd dataframe
def import_evaluation(dir_path: str):
path_list = Path(dir_path).glob('**/*.json')
evaluation_data = pd.DataFrame()
prediction_data = | pd.DataFrame() | pandas.DataFrame |
import json
import pandas as pd
import datetime
import time
import math
import pprint
import gspread
from oauth2client.service_account import ServiceAccountCredentials
# Load projects_info
with open('42_projects_info.json', 'r') as file:
js0 = json.loads(file.read())
# Manually list all exception cases
exceptions = {'id': [118, 833, 48, 791, 62, 727, 394, 742, 370], 'score_type': ['sum', 'sum', 'sum', 'sum', 'sum', 'sum', 'sum', 'sum', 'sum']}
projects_42 = {}
# Filter only projects in Benguerir and Khourbiga
for item in js0:
if any((el['id'] == 21 or el['id'] == 16) for el in item['campus']):
projects_42[item['id']] = [item['slug']]
#pprint.pprint(projects_42)
# Load user_info
with open('users_info.txt', 'r') as file:
js0 = json.loads(file.read())
# Make a dataframe from initial json
col_lst = ['login', 'project', 'score', 'exp_score']
df0 = | pd.DataFrame(columns=col_lst) | pandas.DataFrame |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines trial results."""
import abc
import collections
import io
from typing import (
Any,
Callable,
Dict,
Iterable,
Mapping,
Optional,
Sequence,
TYPE_CHECKING,
Tuple,
TypeVar,
Union,
cast,
)
import numpy as np
import pandas as pd
from cirq import value, ops
from cirq._compat import deprecated, proper_repr, _warn_or_error
from cirq.study import resolver
if TYPE_CHECKING:
import cirq
T = TypeVar('T')
TMeasurementKey = Union[str, 'cirq.Qid', Iterable['cirq.Qid']]
def _tuple_of_big_endian_int(bit_groups: Iterable[Any]) -> Tuple[int, ...]:
"""Returns the big-endian integers specified by groups of bits.
Args:
bit_groups: Groups of descending bits, each specifying a big endian
integer with the 1s bit at the end.
Returns:
A tuple containing the integer for each group.
"""
return tuple(value.big_endian_bits_to_int(bits) for bits in bit_groups)
def _bitstring(vals: Iterable[Any]) -> str:
str_list = [str(int(v)) for v in vals]
separator = '' if all(len(s) == 1 for s in str_list) else ' '
return separator.join(str_list)
def _keyed_repeated_bitstrings(vals: Mapping[str, np.ndarray]) -> str:
keyed_bitstrings = []
for key in sorted(vals.keys()):
reps = vals[key]
n = 0 if len(reps) == 0 else len(reps[0])
all_bits = ', '.join(_bitstring(reps[:, i]) for i in range(n))
keyed_bitstrings.append(f'{key}={all_bits}')
return '\n'.join(keyed_bitstrings)
def _key_to_str(key: TMeasurementKey) -> str:
if isinstance(key, str):
return key
if isinstance(key, ops.Qid):
return str(key)
return ','.join(str(q) for q in key)
class Result(abc.ABC):
"""The results of multiple executions of a circuit with fixed parameters."""
def __new__(cls, *args, **kwargs):
if cls is Result:
_warn_or_error(
"Result constructor is deprecated and will be removed in cirq v0.15. "
"Use the ResultDict constructor instead, or another concrete subclass."
)
return ResultDict(*args, **kwargs)
return super().__new__(cls)
@property
@abc.abstractmethod
def params(self) -> 'cirq.ParamResolver':
"""A ParamResolver of settings used for this result."""
@property
@abc.abstractmethod
def measurements(self) -> Mapping[str, np.ndarray]:
"""A mapping from measurement gate key to measurement results.
The value for each key is a 2-D array of booleans, with the first index
running over the repetitions, and the second index running over the
qubits for the corresponding measurements.
"""
@property
@abc.abstractmethod
def records(self) -> Mapping[str, np.ndarray]:
"""A mapping from measurement key to measurement records.
The value for each key is a 3-D array of booleans, with the first index
running over circuit repetitions, the second index running over instances
of the measurement key in the circuit, and the third index running over
the qubits for the corresponding measurements.
"""
@property
@abc.abstractmethod
def data(self) -> pd.DataFrame:
"""Measurements converted to a pandas dataframe.
The rows in the returned data frame correspond to repetitions of the
circuit, and the columns correspond to measurement keys, where each
element is a big-endian integer representation of measurement outcomes
for the measurement key in that repetition. To convert these ints to
bits see `cirq.big_endian_int_to_bits` and similar functions.
"""
@staticmethod
def dataframe_from_measurements(measurements: Mapping[str, np.ndarray]) -> pd.DataFrame:
"""Converts the given measurements to a pandas dataframe.
This can be used by subclasses as a default implementation for the data
property. Note that subclasses should typically memoize the result to
avoid recomputing.
"""
# Convert to a DataFrame with columns as measurement keys, rows as
# repetitions and a big endian integer for individual measurements.
converted_dict = {
key: [value.big_endian_bits_to_int(m_vals) for m_vals in val]
for key, val in measurements.items()
}
# Note that when a numpy array is produced from this data frame,
# Pandas will try to use np.int64 as dtype, but will upgrade to
# object if any value is too large to fit.
return | pd.DataFrame(converted_dict, dtype=np.int64) | pandas.DataFrame |
import pandas as pd
from dtd import implied_volatility, implied_mu, merton_pd
## 获取样本数据,该数据已经按照所需的格式准备好。
data = pd.read_csv("sample_data.csv")
data["trading_date"] = pd.to_datetime(data["trading_date"])
data.set_index("trading_date", inplace=True)
data.sort_index(inplace=True)
equity = data["equity"].values
liab = data["liability"].values
rfr = data["rfr"].values
gap = data["gap"].values
## 计算mu和sigma
vol, flag = implied_volatility(equity, liab, rfr, gap, term=1)
vol = vol[0]
mu = implied_mu(vol, equity, liab, rfr, gap, term=1)
'''
计算隐含资产价值、 DTD和PD。
用历史数据估计的参数事实上只能计算最后一天的值,其余的PD需要里用当天往前一年的数据来得到参数。
以下,我们仍然使用同样的参数来计算整个历史,仅仅作为测试来查看历史变动,并不是正确做法。
'''
mertonPD, DTD, impAsset = merton_pd(equity, liab, rfr, vol, mu, term=1)
mertonPD = pd.Series(mertonPD, index=data.index)
DTD = pd.Series(DTD, index=data.index)
impAsset = | pd.Series(impAsset, index=data.index) | pandas.Series |
# core
import re
import os
import glob
import calendar
# installed
import pandas as pd
import numpy as np
from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor
# custom
import scrape_shortsqueeze as ss
from utils import get_home_dir
HOME_DIR = get_home_dir(repo_name='scrape_stocks')
def fix_truecar_problem(df, f, verbose=False, second_try=False):
# fixes truecar ticker error; is listed as '1' in the data
# did some spreadsheets have 'Company' as the first column?
if 'ShortSqueeze.com Short Interest Data' in df.columns:
df.rename(columns={'ShortSqueeze.com Short Interest Data': 'Company'},
inplace=True)
else:
df.rename(columns={'ShortSqueeze.com™ Short Interest Data': 'Company'},
inplace=True)
tc_idx = df[df['Company'] == 'Truecar Incorporated']
if tc_idx.shape[0] == 0:
if verbose:
print('"Truecar Incorporated" not found in company names, trying "Truecar Inc"')
tc_idx = df[df['Company'] == 'Truecar Inc']
if tc_idx.shape[0] == 0:
print('\n' * 10)
print('"Truecar Inc not found...error"')
print('re-downloading data for', f, '...')
if not second_try:
ss.download_daily_data(date=f.split('/')[-1].split('.')[0])
return False
# usually if this problem is here, there will be a problem at:
# 251 # drop the end junk, and the fully missing rows (usually 2 at the end)
# --> 252 df = df.iloc[:end_idxs[0]]
# 253 df.drop(df.index[df.isnull().all(1)], inplace=True)
# 254 df['Date'] = pd.to_datetime(f.split('/')[-1].split('.')[0])
print('you may want to examine', f)
return True
tc_idx = tc_idx.index[0]
df.at[tc_idx, 'Symbol'] = 'TRUE'
# df.set_value(tc_idx, 'Symbol', 'TRUE') # old way of doing it
return True
def load_parse_excel(f, dates_df, rev_cal_dict, verbose=False):
if verbose:
print(f)
df = | pd.read_excel(f) | pandas.read_excel |
# Imports
import os
import json
import pandas as pd
import parse_sc2
parse_sc2.read_sc2_replay("data/rep2.SC2Replay")
replay_dict = parse_sc2.parse_sc2_json("tmp.json")
replay_dataframes = parse_sc2.org_sc2_dict(replay_dict)
output = replay_dataframes['players']
map_title = replay_dataframes['details']['title']
game_time = parse_sc2.get_maptime(replay_dataframes['details']['timeUTC'])
game_version = replay_dataframes['header']['version']['major'] + (replay_dataframes['header']['version']['minor']/100 )
spawn_location = parse_sc2.get_spawn_location(replay_dataframes['tracker'])
player_data = replay_dataframes['players']
# Get spawn locations
# Get metadata for matchup -- patch, etc
plotdf = replay_dataframes['tracker']
zzz = parse_sc2.get_first_unit_time(plotdf, unitList = ['Nexus', 'CommandCenter', 'Hatchery'])
parse_sc2.get_first_unit_time(plotdf, unitList = ['EngineeringBay', 'Forge', 'EvolutionChamber'])
parse_sc2.get_first_unit_time(plotdf, unitList = ['Spire', 'Stargate', 'Starport'])
parse_sc2.get_first_unit_time(plotdf, unitList = ['Refinery', 'Extractor', 'Assimilator'])
parse_sc2.get_first_unit_time(plotdf, unitList = ['Factory', 'RoboticsFacility', 'RoachWarren'])
#plotdf = replay_dataframes["stats"]
#plotdf = replay_dataframes["gameevent"]
#plotdf = replay_dataframes["tracker"]
# Do this by player, get minimum loop value
myvalues = {'unitTypeName': ['Barracks', 'Gateway', 'SpawningPool']}
plotdf = plotdf[plotdf['unitTypeName'].isin(["Barracks", "Gateway", "SpawningPool"])]
plotdf.groupby('controlPlayerId')['loop'].min()
unitList = ['Refinery', 'Extractor', 'Assimilator']
parse_sc2.get_timings(plotdf, unitStub = "gasTiming", unitList = unitList, n = 4)
unitList = ['Nexus', 'CommandCenter', 'Hatchery']
parse_sc2.get_timings(plotdf, unitStub = "townHallTiming", unitList = unitList, n = 2)
plotdf = plotdf[plotdf['unitTypeName'].isin(unitList)]
out = plotdf.groupby('controlPlayerId')['loop'].unique()
out = pd.DataFrame(out)
out = out.loop.apply(pd.Series)
out = out.reset_index()
["gasTiming_" + s for s in [str(x) for x in range(1, 8)]]
out.columns = [["playerId"] + ["gasTiming_" + s for s in [str(x) for x in range(1, 8)]]]
# Write a function to get stats at certain values
l = list(range(1,22))
l = [480*x for x in l]
def get_econ_timing(data, timeList, var):
data = data[data['loop'].isin(timeList)]
var = ['playerId', 'loop'] + var
data = data.loc[:,var]
data = pd.wide_to_long(data, ["scoreValue"], i=["playerId", 'loop'], j="score",
suffix = "\\D+")
data.reset_index(inplace=True)
return(data)
scoreIndex = ['scoreValueMineralsUsedCurrentEconomy',
'scoreValueVespeneUsedCurrentEconomy',
'scoreValueWorkersActiveCount',
'scoreValueFoodMade',
'scoreValueFoodUsed',
'scoreValueMineralsUsedCurrentTechnology',
'scoreValueVespeneLostArmy',
'scoreValueMineralsLostArmy',
'scoreValueVespeneKilledArmy',
'scoreValueMineralsKilledArmy']
plotdf = replay_dataframes["stats"]
zzz = get_econ_timing(plotdf, timeList = l, var = scoreIndex)
zzz = zzz.pivot_table(index = 'playerId', columns = ['score', 'loop'])
zzz = zzz.reset_index(inplace=True)
# Loop < 5760 = first 6 minutes of the game
# For each player we want to create a dataset with attributes like
# Player race
# Opponent race
# Spawn location (top, bottom)
# Map
# Map size
# Time of first barracks, spawning pool, gateway
# Time of first spire, stargate, starport
# Time of first robotics, factory, or roach warren /hydralisk den?
#
# Time of first expansion
# Time of second expansion
# Time of first gas
# Time of second gas geyser
# Time of third gas geyser
# Supply at increments of 30 seconds
# Damage taken at increments of 30 seconds
### Exploratory stuff
replay_dict['GameEvts'][320:321]
replay_dict['Details']['Struct']['timeUTC']
replay_dict['Details']['Struct']['timeLocalOffset']
# Game Event Data
ge_df = pd.DataFrame(replay_dict['GameEvts'])
ge_df = pd.concat([ge_df.loc[:,['ID','Name']], ge_df['Struct'].apply(pd.Series)],
axis = 1)
# pd.value_counts(ge_df['name'])
# Game events contains camera updates, user selections,
# pings
# control group updates
# Probably joins with te_df by loop
# What defines a unique row here?
# Tracker data
te_df = pd.DataFrame(replay_dict['TrackerEvts']['Evts'])
# Expand the struct into multiple columns, filling in empty values
te_df = pd.concat([te_df.loc[:,['ID','Name']], te_df['Struct'].apply(pd.Series)],
axis = 1)
# Explode out the stats attribute
stats_df = te_df['stats'].apply(pd.Series)
stats_df = pd.concat([te_df.loc[:,['ID', 'Name', 'loop', 'playerId']], stats_df], axis = 1)
del stats_df[0]
stats_df = stats_df.sort_values(by = "loop")
# te_df contains tracker data
# contains
# UnitBorn UnitDied
# PlayerStats
# Upgrade
# Unit Positions
# pd.DataFrame.hist(ge_df, column = "loop")
# can join with loop as the primary key to ge_df
# What defines a unique row here?
# Player data
player_df = | pd.DataFrame(replay_dict['Metadata']['Struct']['Players']) | pandas.DataFrame |
# Author: <NAME>
#
# License: BSD 3 clause
import logging
import numpy as np
import pandas as pd
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import strip_tags
import umap
import hdbscan
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from joblib import dump, load
from sklearn.cluster import dbscan
import tempfile
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
from scipy.special import softmax
try:
import hnswlib
_HAVE_HNSWLIB = True
except ImportError:
_HAVE_HNSWLIB = False
try:
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text
_HAVE_TENSORFLOW = True
except ImportError:
_HAVE_TENSORFLOW = False
try:
from sentence_transformers import SentenceTransformer
_HAVE_TORCH = True
except ImportError:
_HAVE_TORCH = False
logger = logging.getLogger('top2vec')
logger.setLevel(logging.WARNING)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(sh)
def default_tokenizer(doc):
"""Tokenize documents for training and remove too long/short words"""
return simple_preprocess(strip_tags(doc), deacc=True)
class Top2Vec:
"""
Top2Vec
Creates jointly embedded topic, document and word vectors.
Parameters
----------
embedding_model: string
This will determine which model is used to generate the document and
word embeddings. The valid string options are:
* doc2vec
* universal-sentence-encoder
* universal-sentence-encoder-multilingual
* distiluse-base-multilingual-cased
For large data sets and data sets with very unique vocabulary doc2vec
could produce better results. This will train a doc2vec model from
scratch. This method is language agnostic. However multiple languages
will not be aligned.
Using the universal sentence encoder options will be much faster since
those are pre-trained and efficient models. The universal sentence
encoder options are suggested for smaller data sets. They are also
good options for large data sets that are in English or in languages
covered by the multilingual model. It is also suggested for data sets
that are multilingual.
For more information on universal-sentence-encoder visit:
https://tfhub.dev/google/universal-sentence-encoder/4
For more information on universal-sentence-encoder-multilingual visit:
https://tfhub.dev/google/universal-sentence-encoder-multilingual/3
The distiluse-base-multilingual-cased pre-trained sentence transformer
is suggested for multilingual datasets and languages that are not
covered by the multilingual universal sentence encoder. The
transformer is significantly slower than the universal sentence
encoder options.
For more informati ond istiluse-base-multilingual-cased visit:
https://www.sbert.net/docs/pretrained_models.html
embedding_model_path: string (Optional)
Pre-trained embedding models will be downloaded automatically by
default. However they can also be uploaded from a file that is in the
location of embedding_model_path.
Warning: the model at embedding_model_path must match the
Warning: the model at embedding_model_path must match the
embedding_model parameter type.
documents: List of str
Input corpus, should be a list of strings.
min_count: int (Optional, default 50)
Ignores all words with total frequency lower than this. For smaller
corpora a smaller min_count will be necessary.
speed: string (Optional, default 'learn')
This parameter is only used when using doc2vec as embedding_model.
It will determine how fast the model takes to train. The
fast-learn option is the fastest and will generate the lowest quality
vectors. The learn option will learn better quality vectors but take
a longer time to train. The deep-learn option will learn the best
quality vectors but will take significant time to train. The valid
string speed options are:
* fast-learn
* learn
* deep-learn
use_corpus_file: bool (Optional, default False)
This parameter is only used when using doc2vec as embedding_model.
Setting use_corpus_file to True can sometimes provide speedup for
large datasets when multiple worker threads are available. Documents
are still passed to the model as a list of str, the model will create
a temporary corpus file for training.
document_ids: List of str, int (Optional)
A unique value per document that will be used for referring to
documents in search results. If ids are not given to the model, the
index of each document in the original corpus will become the id.
keep_documents: bool (Optional, default True)
If set to False documents will only be used for training and not saved
as part of the model. This will reduce model size. When using search
functions only document ids will be returned, not the actual
documents.
workers: int (Optional)
The amount of worker threads to be used in training the model. Larger
amount will lead to faster training.
tokenizer: callable (Optional, default None)
Override the default tokenization method. If None then
gensim.utils.simple_preprocess will be used.
use_embedding_model_tokenizer: bool (Optional, default False)
If using an embedding model other than doc2vec, use the model's
tokenizer for document embedding. If set to True the tokenizer, either
default or passed callable will be used to tokenize the text to
extract the vocabulary for word embedding.
verbose: bool (Optional, default True)
Whether to print status data during training.
"""
def __init__(self,
documents,
min_count=50,
embedding_model='doc2vec',
embedding_model_path=None,
speed='learn',
use_corpus_file=False,
document_ids=None,
keep_documents=True,
workers=None,
tokenizer=None,
use_embedding_model_tokenizer=False,
verbose=True,
umap_args=None,
hdbscan_args=None):
if verbose:
logger.setLevel(logging.DEBUG)
self.verbose = True
else:
logger.setLevel(logging.WARNING)
self.verbose = False
if tokenizer is not None:
self._tokenizer = tokenizer
else:
self._tokenizer = default_tokenizer
# validate documents
if not (isinstance(documents, list) or isinstance(documents, np.ndarray)):
raise ValueError("Documents need to be a list of strings")
if not all((isinstance(doc, str) or isinstance(doc, np.str_)) for doc in documents):
raise ValueError("Documents need to be a list of strings")
if keep_documents:
self.documents = np.array(documents, dtype="object")
else:
self.documents = None
# validate document ids
if document_ids is not None:
if not (isinstance(document_ids, list) or isinstance(document_ids, np.ndarray)):
raise ValueError("Documents ids need to be a list of str or int")
if len(documents) != len(document_ids):
raise ValueError("Document ids need to match number of documents")
elif len(document_ids) != len(set(document_ids)):
raise ValueError("Document ids need to be unique")
if all((isinstance(doc_id, str) or isinstance(doc_id, np.str_)) for doc_id in document_ids):
self.doc_id_type = np.str_
elif all((isinstance(doc_id, int) or isinstance(doc_id, np.int_)) for doc_id in document_ids):
self.doc_id_type = np.int_
else:
raise ValueError("Document ids need to be str or int")
self.document_ids_provided = True
self.document_ids = np.array(document_ids)
self.doc_id2index = dict(zip(document_ids, list(range(0, len(document_ids)))))
else:
self.document_ids_provided = False
self.document_ids = np.array(range(0, len(documents)))
self.doc_id2index = dict(zip(self.document_ids, list(range(0, len(self.document_ids)))))
self.doc_id_type = np.int_
acceptable_embedding_models = ["universal-sentence-encoder-multilingual",
"universal-sentence-encoder",
"distiluse-base-multilingual-cased"]
self.embedding_model_path = embedding_model_path
if embedding_model == 'doc2vec':
# validate training inputs
if speed == "fast-learn":
hs = 0
negative = 5
epochs = 40
elif speed == "learn":
hs = 1
negative = 0
epochs = 40
elif speed == "deep-learn":
hs = 1
negative = 0
epochs = 400
elif speed == "test-learn":
hs = 0
negative = 5
epochs = 1
else:
raise ValueError("speed parameter needs to be one of: fast-learn, learn or deep-learn")
if workers is None:
pass
elif isinstance(workers, int):
pass
else:
raise ValueError("workers needs to be an int")
doc2vec_args = {"vector_size": 300,
"min_count": min_count,
"window": 15,
"sample": 1e-5,
"negative": negative,
"hs": hs,
"epochs": epochs,
"dm": 0,
"dbow_words": 1}
if workers is not None:
doc2vec_args["workers"] = workers
logger.info('Pre-processing documents for training')
if use_corpus_file:
processed = [' '.join(self._tokenizer(doc)) for doc in documents]
lines = "\n".join(processed)
temp = tempfile.NamedTemporaryFile(mode='w+t')
temp.write(lines)
doc2vec_args["corpus_file"] = temp.name
else:
train_corpus = [TaggedDocument(self._tokenizer(doc), [i]) for i, doc in enumerate(documents)]
doc2vec_args["documents"] = train_corpus
logger.info('Creating joint document/word embedding')
self.embedding_model = 'doc2vec'
self.model = Doc2Vec(**doc2vec_args)
if use_corpus_file:
temp.close()
elif embedding_model in acceptable_embedding_models:
self.embed = None
self.embedding_model = embedding_model
self._check_import_status()
logger.info('Pre-processing documents for training')
# preprocess documents
train_corpus = [' '.join(self._tokenizer(doc)) for doc in documents]
# preprocess vocabulary
vectorizer = CountVectorizer()
doc_word_counts = vectorizer.fit_transform(train_corpus)
words = vectorizer.get_feature_names()
word_counts = np.array(np.sum(doc_word_counts, axis=0).tolist()[0])
vocab_inds = np.where(word_counts > min_count)[0]
if len(vocab_inds) == 0:
raise ValueError(f"A min_count of {min_count} results in "
f"all words being ignored, choose a lower value.")
self.vocab = [words[ind] for ind in vocab_inds]
self._check_model_status()
logger.info('Creating joint document/word embedding')
# embed words
self.word_indexes = dict(zip(self.vocab, range(len(self.vocab))))
self.word_vectors = self._l2_normalize(np.array(self.embed(self.vocab)))
# embed documents
if use_embedding_model_tokenizer:
self.document_vectors = self._embed_documents(documents)
else:
self.document_vectors = self._embed_documents(train_corpus)
else:
raise ValueError(f"{embedding_model} is an invalid embedding model.")
# create 5D embeddings of documents
logger.info('Creating lower dimension embedding of documents')
self.umap_args = {'n_neighbors': 15,
'n_components': 5,
'metric': 'cosine'} if umap_args is None else umap_args
umap_model = umap.UMAP(**self.umap_args).fit(self._get_document_vectors(norm=False))
# find dense areas of document vectors
logger.info('Finding dense areas of documents')
self.hdbscan_args = {'min_cluster_size': 15,
'metric': 'euclidean',
'cluster_selection_method': 'eom'} if hdbscan_args is None else hdbscan_args
cluster = hdbscan.HDBSCAN(**self.hdbscan_args).fit(umap_model.embedding_)
# calculate topic vectors from dense areas of documents
logger.info('Finding topics')
# create topic vectors
self._create_topic_vectors(cluster.labels_)
# deduplicate topics
self._deduplicate_topics()
# find topic words and scores
self.topic_words, self.topic_word_scores = self._find_topic_words_and_scores(topic_vectors=self.topic_vectors)
# assign documents to topic
self.doc_top, self.doc_dist = self._calculate_documents_topic(self.topic_vectors,
self._get_document_vectors())
# calculate topic sizes
self.topic_sizes = self._calculate_topic_sizes(hierarchy=False)
# re-order topics
self._reorder_topics(hierarchy=False)
# initialize variables for hierarchical topic reduction
self.topic_vectors_reduced = None
self.doc_top_reduced = None
self.doc_dist_reduced = None
self.topic_sizes_reduced = None
self.topic_words_reduced = None
self.topic_word_scores_reduced = None
self.hierarchy = None
# initialize document indexing variables
self.document_index = None
self.serialized_document_index = None
self.documents_indexed = False
self.index_id2doc_id = None
self.doc_id2index_id = None
# initialize word indexing variables
self.word_index = None
self.serialized_word_index = None
self.words_indexed = False
def save(self, file):
"""
Saves the current model to the specified file.
Parameters
----------
file: str
File where model will be saved.
"""
# do not save sentence encoders and sentence transformers
if self.embedding_model != "doc2vec":
self.embed = None
# serialize document index so that it can be saved
if self.documents_indexed:
temp = tempfile.NamedTemporaryFile(mode='w+b')
self.document_index.save_index(temp.name)
self.serialized_document_index = temp.read()
temp.close()
self.document_index = None
# serialize word index so that it can be saved
if self.words_indexed:
temp = tempfile.NamedTemporaryFile(mode='w+b')
self.word_index.save_index(temp.name)
self.serialized_word_index = temp.read()
temp.close()
self.word_index = None
dump(self, file)
@classmethod
def load(cls, file):
"""
Load a pre-trained model from the specified file.
Parameters
----------
file: str
File where model will be loaded from.
"""
top2vec_model = load(file)
# load document index
if top2vec_model.documents_indexed:
if not _HAVE_HNSWLIB:
raise ImportError(f"Cannot load document index.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
temp = tempfile.NamedTemporaryFile(mode='w+b')
temp.write(top2vec_model.serialized_document_index)
if top2vec_model.embedding_model == 'doc2vec':
document_vectors = top2vec_model.model.docvecs.vectors_docs
else:
document_vectors = top2vec_model.document_vectors
top2vec_model.document_index = hnswlib.Index(space='ip',
dim=document_vectors.shape[1])
top2vec_model.document_index.load_index(temp.name, max_elements=document_vectors.shape[0])
temp.close()
top2vec_model.serialized_document_index = None
# load word index
if top2vec_model.words_indexed:
if not _HAVE_HNSWLIB:
raise ImportError(f"Cannot load word index.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
temp = tempfile.NamedTemporaryFile(mode='w+b')
temp.write(top2vec_model.serialized_word_index)
if top2vec_model.embedding_model == 'doc2vec':
word_vectors = top2vec_model.model.wv.vectors
else:
word_vectors = top2vec_model.word_vectors
top2vec_model.word_index = hnswlib.Index(space='ip',
dim=word_vectors.shape[1])
top2vec_model.word_index.load_index(temp.name, max_elements=word_vectors.shape[0])
temp.close()
top2vec_model.serialized_word_index = None
return top2vec_model
@staticmethod
def _l2_normalize(vectors):
if vectors.ndim == 2:
return normalize(vectors)
else:
return normalize(vectors.reshape(1, -1))[0]
def _embed_documents(self, train_corpus):
self._check_import_status()
self._check_model_status()
# embed documents
batch_size = 500
document_vectors = []
current = 0
batches = int(len(train_corpus) / batch_size)
extra = len(train_corpus) % batch_size
for ind in range(0, batches):
document_vectors.append(self.embed(train_corpus[current:current + batch_size]))
current += batch_size
if extra > 0:
document_vectors.append(self.embed(train_corpus[current:current + extra]))
document_vectors = self._l2_normalize(np.array(np.vstack(document_vectors)))
return document_vectors
def _set_document_vectors(self, document_vectors):
if self.embedding_model == 'doc2vec':
self.model.docvecs.vectors_docs = document_vectors
else:
self.document_vectors = document_vectors
def _get_document_vectors(self, norm=True):
if self.embedding_model == 'doc2vec':
if norm:
self.model.docvecs.init_sims()
return self.model.docvecs.vectors_docs_norm
else:
return self.model.docvecs.vectors_docs
else:
return self.document_vectors
def _index2word(self, index):
if self.embedding_model == 'doc2vec':
return self.model.wv.index2word[index]
else:
return self.vocab[index]
def _get_word_vectors(self):
if self.embedding_model == 'doc2vec':
self.model.wv.init_sims()
return self.model.wv.vectors_norm
else:
return self.word_vectors
def _create_topic_vectors(self, cluster_labels):
unique_labels = set(cluster_labels)
if -1 in unique_labels:
unique_labels.remove(-1)
self.topic_vectors = self._l2_normalize(
np.vstack([self._get_document_vectors(norm=False)[np.where(cluster_labels == label)[0]]
.mean(axis=0) for label in unique_labels]))
def _deduplicate_topics(self):
core_samples, labels = dbscan(X=self.topic_vectors,
eps=0.1,
min_samples=2,
metric="cosine")
duplicate_clusters = set(labels)
if len(duplicate_clusters) > 1 or -1 not in duplicate_clusters:
# unique topics
unique_topics = self.topic_vectors[np.where(labels == -1)[0]]
if -1 in duplicate_clusters:
duplicate_clusters.remove(-1)
# merge duplicate topics
for unique_label in duplicate_clusters:
unique_topics = np.vstack(
[unique_topics, self._l2_normalize(self.topic_vectors[np.where(labels == unique_label)[0]]
.mean(axis=0))])
self.topic_vectors = unique_topics
def _calculate_topic_sizes(self, hierarchy=False):
if hierarchy:
topic_sizes = pd.Series(self.doc_top_reduced).value_counts()
else:
topic_sizes = pd.Series(self.doc_top).value_counts()
return topic_sizes
def _reorder_topics(self, hierarchy=False):
if hierarchy:
self.topic_vectors_reduced = self.topic_vectors_reduced[self.topic_sizes_reduced.index]
self.topic_words_reduced = self.topic_words_reduced[self.topic_sizes_reduced.index]
self.topic_word_scores_reduced = self.topic_word_scores_reduced[self.topic_sizes_reduced.index]
old2new = dict(zip(self.topic_sizes_reduced.index, range(self.topic_sizes_reduced.index.shape[0])))
self.doc_top_reduced = np.array([old2new[i] for i in self.doc_top_reduced])
self.hierarchy = [self.hierarchy[i] for i in self.topic_sizes_reduced.index]
self.topic_sizes_reduced.reset_index(drop=True, inplace=True)
else:
self.topic_vectors = self.topic_vectors[self.topic_sizes.index]
self.topic_words = self.topic_words[self.topic_sizes.index]
self.topic_word_scores = self.topic_word_scores[self.topic_sizes.index]
old2new = dict(zip(self.topic_sizes.index, range(self.topic_sizes.index.shape[0])))
self.doc_top = np.array([old2new[i] for i in self.doc_top])
self.topic_sizes.reset_index(drop=True, inplace=True)
@staticmethod
def _calculate_documents_topic(topic_vectors, document_vectors, dist=True):
batch_size = 10000
doc_top = []
if dist:
doc_dist = []
if document_vectors.shape[0] > batch_size:
current = 0
batches = int(document_vectors.shape[0] / batch_size)
extra = document_vectors.shape[0] % batch_size
for ind in range(0, batches):
res = np.inner(document_vectors[current:current + batch_size], topic_vectors)
doc_top.extend(np.argmax(res, axis=1))
if dist:
doc_dist.extend(np.max(res, axis=1))
current += batch_size
if extra > 0:
res = np.inner(document_vectors[current:current + extra], topic_vectors)
doc_top.extend(np.argmax(res, axis=1))
if dist:
doc_dist.extend(np.max(res, axis=1))
if dist:
doc_dist = np.array(doc_dist)
else:
res = np.inner(document_vectors, topic_vectors)
doc_top = np.argmax(res, axis=1)
if dist:
doc_dist = np.max(res, axis=1)
if dist:
return doc_top, doc_dist
else:
return doc_top
def _find_topic_words_and_scores(self, topic_vectors):
topic_words = []
topic_word_scores = []
res = np.inner(topic_vectors, self._get_word_vectors())
top_words = np.flip(np.argsort(res, axis=1), axis=1)
top_scores = np.flip(np.sort(res, axis=1), axis=1)
for words, scores in zip(top_words, top_scores):
topic_words.append([self._index2word(i) for i in words[0:50]])
topic_word_scores.append(scores[0:50])
topic_words = np.array(topic_words)
topic_word_scores = np.array(topic_word_scores)
return topic_words, topic_word_scores
def _assign_documents_to_topic(self, document_vectors, hierarchy=False):
if hierarchy:
doc_top_new, doc_dist_new = self._calculate_documents_topic(self.topic_vectors_reduced,
document_vectors,
dist=True)
self.doc_top_reduced = np.append(self.doc_top_reduced, doc_top_new)
self.doc_dist_reduced = np.append(self.doc_dist_reduced, doc_dist_new)
topic_sizes_new = pd.Series(doc_top_new).value_counts()
for top in topic_sizes_new.index.tolist():
self.topic_sizes_reduced[top] += topic_sizes_new[top]
self.topic_sizes_reduced.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
else:
doc_top_new, doc_dist_new = self._calculate_documents_topic(self.topic_vectors, document_vectors, dist=True)
self.doc_top = np.append(self.doc_top, doc_top_new)
self.doc_dist = np.append(self.doc_dist, doc_dist_new)
topic_sizes_new = pd.Series(doc_top_new).value_counts()
for top in topic_sizes_new.index.tolist():
self.topic_sizes[top] += topic_sizes_new[top]
self.topic_sizes.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
def _unassign_documents_from_topic(self, doc_indexes, hierarchy=False):
if hierarchy:
doc_top_remove = self.doc_top_reduced[doc_indexes]
self.doc_top_reduced = np.delete(self.doc_top_reduced, doc_indexes, 0)
self.doc_dist_reduced = np.delete(self.doc_dist_reduced, doc_indexes, 0)
topic_sizes_remove = pd.Series(doc_top_remove).value_counts()
for top in topic_sizes_remove.index.tolist():
self.topic_sizes_reduced[top] -= topic_sizes_remove[top]
self.topic_sizes_reduced.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
else:
doc_top_remove = self.doc_top[doc_indexes]
self.doc_top = np.delete(self.doc_top, doc_indexes, 0)
self.doc_dist = np.delete(self.doc_dist, doc_indexes, 0)
topic_sizes_remove = pd.Series(doc_top_remove).value_counts()
for top in topic_sizes_remove.index.tolist():
self.topic_sizes[top] -= topic_sizes_remove[top]
self.topic_sizes.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
def _get_document_ids(self, doc_index):
return self.document_ids[doc_index]
def _get_document_indexes(self, doc_ids):
if self.document_ids is None:
return doc_ids
else:
return [self.doc_id2index[doc_id] for doc_id in doc_ids]
def _words2word_vectors(self, keywords):
return self._get_word_vectors()[[self._word2index(word) for word in keywords]]
def _word2index(self, word):
if self.embedding_model == 'doc2vec':
return self.model.wv.vocab[word].index
else:
return self.word_indexes[word]
def _get_combined_vec(self, vecs, vecs_neg):
combined_vector = np.zeros(self._get_document_vectors().shape[1], dtype=np.float64)
for vec in vecs:
combined_vector += vec
for vec in vecs_neg:
combined_vector -= vec
combined_vector /= (len(vecs) + len(vecs_neg))
combined_vector = self._l2_normalize(combined_vector)
return combined_vector
@staticmethod
def _search_vectors_by_vector(vectors, vector, num_res):
ranks = np.inner(vectors, vector)
indexes = np.flip(np.argsort(ranks)[-num_res:])
scores = np.array([ranks[res] for res in indexes])
return indexes, scores
@staticmethod
def _check_hnswlib_status():
if not _HAVE_HNSWLIB:
raise ImportError(f"Indexing is not available.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
def _check_document_index_status(self):
if self.document_index is None:
raise ImportError("There is no document index.\n\n"
"Call index_document_vectors method before setting use_index=True.")
def _check_word_index_status(self):
if self.word_index is None:
raise ImportError("There is no word index.\n\n"
"Call index_word_vectors method before setting use_index=True.")
def _check_import_status(self):
if self.embedding_model != 'distiluse-base-multilingual-cased':
if not _HAVE_TENSORFLOW:
raise ImportError(f"{self.embedding_model} is not available.\n\n"
"Try: pip install top2vec[sentence_encoders]\n\n"
"Alternatively try: pip install tensorflow tensorflow_hub tensorflow_text")
else:
if not _HAVE_TORCH:
raise ImportError(f"{self.embedding_model} is not available.\n\n"
"Try: pip install top2vec[sentence_transformers]\n\n"
"Alternatively try: pip install torch sentence_transformers")
def _check_model_status(self):
if self.embed is None:
if self.verbose is False:
logger.setLevel(logging.DEBUG)
if self.embedding_model != "distiluse-base-multilingual-cased":
if self.embedding_model_path is None:
logger.info(f'Downloading {self.embedding_model} model')
if self.embedding_model == "universal-sentence-encoder-multilingual":
module = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
else:
module = "https://tfhub.dev/google/universal-sentence-encoder/4"
else:
logger.info(f'Loading {self.embedding_model} model at {self.embedding_model_path}')
module = self.embedding_model_path
self.embed = hub.load(module)
else:
if self.embedding_model_path is None:
logger.info(f'Downloading {self.embedding_model} model')
module = 'distiluse-base-multilingual-cased'
else:
logger.info(f'Loading {self.embedding_model} model at {self.embedding_model_path}')
module = self.embedding_model_path
model = SentenceTransformer(module)
self.embed = model.encode
if self.verbose is False:
logger.setLevel(logging.WARNING)
@staticmethod
def _less_than_zero(num, var_name):
if num < 0:
raise ValueError(f"{var_name} cannot be less than 0.")
def _validate_hierarchical_reduction(self, reduced):
if reduced is None: # When user designate the reduced parameter
return self.hierarchy is not None
elif reduced and self.hierarchy is None:
raise ValueError("Hierarchical topic reduction has not been performed.")
else:
return reduced
def _validate_hierarchical_reduction_num_topics(self, num_topics):
current_num_topics = len(self.topic_vectors)
if num_topics >= current_num_topics:
raise ValueError(f"Number of topics must be less than {current_num_topics}.")
def _validate_num_docs(self, num_docs):
self._less_than_zero(num_docs, "num_docs")
document_count = len(self.doc_top)
if num_docs > document_count:
raise ValueError(f"num_docs cannot exceed the number of documents: {document_count}.")
def _validate_num_topics(self, num_topics, reduced):
self._less_than_zero(num_topics, "num_topics")
if reduced:
topic_count = len(self.topic_vectors_reduced)
if num_topics > topic_count:
raise ValueError(f"num_topics cannot exceed the number of reduced topics: {topic_count}.")
else:
topic_count = len(self.topic_vectors)
if num_topics > topic_count:
raise ValueError(f"num_topics cannot exceed the number of topics: {topic_count}.")
def _validate_topic_num(self, topic_num, reduced):
self._less_than_zero(topic_num, "topic_num")
if reduced:
topic_count = len(self.topic_vectors_reduced) - 1
if topic_num > topic_count:
raise ValueError(f"Invalid topic number: valid reduced topics numbers are 0 to {topic_count}.")
else:
topic_count = len(self.topic_vectors) - 1
if topic_num > topic_count:
raise ValueError(f"Invalid topic number: valid original topics numbers are 0 to {topic_count}.")
def _validate_topic_search(self, topic_num, num_docs, reduced):
self._less_than_zero(num_docs, "num_docs")
if reduced:
if num_docs > self.topic_sizes_reduced[topic_num]:
raise ValueError(f"Invalid number of documents: reduced topic {topic_num}"
f" only has {self.topic_sizes_reduced[topic_num]} documents.")
else:
if num_docs > self.topic_sizes[topic_num]:
raise ValueError(f"Invalid number of documents: original topic {topic_num}"
f" only has {self.topic_sizes[topic_num]} documents.")
def _validate_doc_ids(self, doc_ids, doc_ids_neg):
if not (isinstance(doc_ids, list) or isinstance(doc_ids, np.ndarray)):
raise ValueError("doc_ids must be a list of string or int.")
if not (isinstance(doc_ids_neg, list) or isinstance(doc_ids_neg, np.ndarray)):
raise ValueError("doc_ids_neg must be a list of string or int.")
if isinstance(doc_ids, np.ndarray):
doc_ids = list(doc_ids)
if isinstance(doc_ids_neg, np.ndarray):
doc_ids_neg = list(doc_ids_neg)
doc_ids_all = doc_ids + doc_ids_neg
for doc_id in doc_ids_all:
if self.document_ids is not None:
if doc_id not in self.document_ids:
raise ValueError(f"{doc_id} is not a valid document id.")
elif doc_id < 0 or doc_id > len(self.doc_top) - 1:
raise ValueError(f"{doc_id} is not a valid document id.")
def _validate_keywords(self, keywords, keywords_neg):
if not (isinstance(keywords, list) or isinstance(keywords, np.ndarray)):
raise ValueError("keywords must be a list of strings.")
if not (isinstance(keywords_neg, list) or isinstance(keywords_neg, np.ndarray)):
raise ValueError("keywords_neg must be a list of strings.")
keywords_lower = [keyword.lower() for keyword in keywords]
keywords_neg_lower = [keyword.lower() for keyword in keywords_neg]
if self.embedding_model == 'doc2vec':
vocab = self.model.wv.vocab
else:
vocab = self.vocab
for word in keywords_lower + keywords_neg_lower:
if word not in vocab:
raise ValueError(f"'{word}' has not been learned by the model so it cannot be searched.")
return keywords_lower, keywords_neg_lower
def _validate_document_ids_add_doc(self, documents, document_ids):
if document_ids is None:
raise ValueError("Document ids need to be provided.")
if len(documents) != len(document_ids):
raise ValueError("Document ids need to match number of documents.")
if len(document_ids) != len(set(document_ids)):
raise ValueError("Document ids need to be unique.")
if len(set(document_ids).intersection(self.document_ids)) > 0:
raise ValueError("Some document ids already exist in model.")
if self.doc_id_type == np.str_:
if not all((isinstance(doc_id, str) or isinstance(doc_id, np.str_)) for doc_id in document_ids):
raise ValueError("Document ids need to be of type str.")
if self.doc_id_type == np.int_:
if not all((isinstance(doc_id, int) or isinstance(doc_id, np.int_)) for doc_id in document_ids):
raise ValueError("Document ids need to be of type int.")
@staticmethod
def _validate_documents(documents):
if not all((isinstance(doc, str) or isinstance(doc, np.str_)) for doc in documents):
raise ValueError("Documents need to be a list of strings.")
def _validate_vector(self, vector):
if not isinstance(vector, np.ndarray):
raise ValueError("Vector needs to be a numpy array.")
vec_size = self._get_document_vectors().shape[1]
if not vector.shape[0] == vec_size:
raise ValueError(f"Vector needs to be of {vec_size} dimensions.")
def index_document_vectors(self, ef_construction=200, M=64):
"""
Creates an index of the document vectors using hnswlib. This will
lead to faster search times for models with a large number of
documents.
For more information on hnswlib see: https://github.com/nmslib/hnswlib
Parameters
----------
ef_construction: int (Optional default 200)
This parameter controls the trade-off between index construction
time and index accuracy. Larger values will lead to greater
accuracy but will take longer to construct.
M: int (Optional default 64)
This parameter controls the trade-off between both index size as
well as construction time and accuracy. Larger values will lead to
greater accuracy but will result in a larger index as well as
longer construction time.
For more information on the parameters see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
"""
self._check_hnswlib_status()
document_vectors = self._get_document_vectors()
vec_dim = document_vectors.shape[1]
num_vecs = document_vectors.shape[0]
index_ids = list(range(0, len(self.document_ids)))
self.index_id2doc_id = dict(zip(index_ids, self.document_ids))
self.doc_id2index_id = dict(zip(self.document_ids, index_ids))
self.document_index = hnswlib.Index(space='ip', dim=vec_dim)
self.document_index.init_index(max_elements=num_vecs, ef_construction=ef_construction, M=M)
self.document_index.add_items(document_vectors, index_ids)
self.documents_indexed = True
def index_word_vectors(self, ef_construction=200, M=64):
"""
Creates an index of the word vectors using hnswlib. This will
lead to faster search times for models with a large number of
words.
For more information on hnswlib see: https://github.com/nmslib/hnswlib
Parameters
----------
ef_construction: int (Optional default 200)
This parameter controls the trade-off between index construction
time and index accuracy. Larger values will lead to greater
accuracy but will take longer to construct.
M: int (Optional default 64)
This parameter controls the trade-off between both index size as
well as construction time and accuracy. Larger values will lead to
greater accuracy but will result in a larger index as well as
longer construction time.
For more information on the parameters see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
"""
self._check_hnswlib_status()
word_vectors = self._get_word_vectors()
vec_dim = word_vectors.shape[1]
num_vecs = word_vectors.shape[0]
index_ids = list(range(0, num_vecs))
self.word_index = hnswlib.Index(space='ip', dim=vec_dim)
self.word_index.init_index(max_elements=num_vecs, ef_construction=ef_construction, M=M)
self.word_index.add_items(word_vectors, index_ids)
self.words_indexed = True
def update_embedding_model_path(self, embedding_model_path):
"""
Update the path of the embedding model to be loaded. The model will
no longer be downloaded but loaded from the path location.
Warning: the model at embedding_model_path must match the
embedding_model parameter type.
Parameters
----------
embedding_model_path: Str
Path to downloaded embedding model.
"""
self.embedding_model_path = embedding_model_path
def change_to_download_embedding_model(self):
"""
Use automatic download to load embedding model used for training.
Top2Vec will no longer try and load the embedding model from a file
if a embedding_model path was previously added.
"""
self.embedding_model_path = None
def get_documents_topics(self, doc_ids, reduced=None):
"""
Get document topics.
The topic of each document will be returned.
The corresponding original topics are returned unless reduced=True,
in which case the reduced topics will be returned.
Parameters
----------
doc_ids: List of str, int
A unique value per document that is used for referring to documents
in search results. If ids were not given to the model, the index of
each document in the model is the id.
reduced: bool (Optional, default False)
Original topics are returned by default. If True the
reduced topics will be returned.
Returns
-------
topic_nums: array of int, shape(doc_ids)
The topic number of the document corresponding to each doc_id.
topic_score: array of float, shape(doc_ids)
Semantic similarity of document to topic. The cosine similarity of
the document and topic vector.
topics_words: array of shape(num_topics, 50)
For each topic the top 50 words are returned, in order
of semantic similarity to topic.
Example:
[['data', 'deep', 'learning' ... 'artificial'], <Topic 4>
['environment', 'warming', 'climate ... 'temperature'] <Topic 21>
...]
word_scores: array of shape(num_topics, 50)
For each topic the cosine similarity scores of the
top 50 words to the topic are returned.
Example:
[[0.7132, 0.6473, 0.5700 ... 0.3455], <Topic 4>
[0.7818', 0.7671, 0.7603 ... 0.6769] <Topic 21>
...]
"""
reduced = self._validate_hierarchical_reduction(reduced)
# make sure documents exist
self._validate_doc_ids(doc_ids, doc_ids_neg=[])
# get document indexes from ids
doc_indexes = self._get_document_indexes(doc_ids)
if reduced:
doc_topics = self.doc_top_reduced[doc_indexes]
doc_dist = self.doc_dist_reduced[doc_indexes]
topic_words = self.topic_words_reduced[doc_topics]
topic_word_scores = self.topic_word_scores_reduced[doc_topics]
else:
doc_topics = self.doc_top[doc_indexes]
doc_dist = self.doc_dist[doc_indexes]
topic_words = self.topic_words[doc_topics]
topic_word_scores = self.topic_word_scores[doc_topics]
return doc_topics, doc_dist, topic_words, topic_word_scores
def add_documents(self, documents, doc_ids=None):
"""
Update the model with new documents.
The documents will be added to the current model without changing
existing document, word and topic vectors. Topic sizes will be updated.
If adding a large quantity of documents relative to the current model
size, or documents containing a largely new vocabulary, a new model
should be trained for best results.
Parameters
----------
documents: List of str
doc_ids: List of str, int (Optional)
Only required when doc_ids were given to the original model.
A unique value per document that will be used for referring to
documents in search results.
"""
# add documents
self._validate_documents(documents)
if self.documents is not None:
self.documents = np.append(self.documents, documents)
# add document ids
if self.document_ids_provided is True:
self._validate_document_ids_add_doc(documents, doc_ids)
doc_ids_len = len(self.document_ids)
self.document_ids = np.append(self.document_ids, doc_ids)
self.doc_id2index.update(dict(zip(doc_ids, list(range(doc_ids_len, doc_ids_len + len(doc_ids))))))
elif doc_ids is None:
num_docs = len(documents)
start_id = max(self.document_ids) + 1
doc_ids = list(range(start_id, start_id + num_docs))
doc_ids_len = len(self.document_ids)
self.document_ids = np.append(self.document_ids, doc_ids)
self.doc_id2index.update(dict(zip(doc_ids, list(range(doc_ids_len, doc_ids_len + len(doc_ids))))))
else:
raise ValueError("doc_ids cannot be used because they were not provided to model during training.")
# get document vectors
docs_processed = [self._tokenizer(doc) for doc in documents]
if self.embedding_model == "doc2vec":
document_vectors = np.vstack([self.model.infer_vector(doc_words=doc,
alpha=0.025,
min_alpha=0.01,
epochs=100) for doc in docs_processed])
num_docs = len(documents)
self.model.docvecs.count += num_docs
self.model.docvecs.max_rawint += num_docs
self.model.docvecs.vectors_docs_norm = None
self._set_document_vectors(np.vstack([self._get_document_vectors(norm=False), document_vectors]))
self.model.docvecs.init_sims()
else:
docs_training = [' '.join(doc) for doc in docs_processed]
document_vectors = self._embed_documents(docs_training)
self._set_document_vectors(np.vstack([self._get_document_vectors(), document_vectors]))
# update index
if self.documents_indexed:
# update capacity of index
current_max = self.documents_index.get_max_elements()
updated_max = current_max + len(documents)
self.documents_index.resize_index(updated_max)
# update index_id and doc_ids
start_index_id = max(self.index_id2doc_id.keys()) + 1
new_index_ids = list(range(start_index_id, start_index_id + len(doc_ids)))
self.index_id2doc_id.update(dict(zip(new_index_ids, doc_ids)))
self.doc_id2index_id.update(dict(zip(doc_ids, new_index_ids)))
self.documents_index.add_items(document_vectors, new_index_ids)
# update topics
self._assign_documents_to_topic(document_vectors, hierarchy=False)
if self.hierarchy is not None:
self._assign_documents_to_topic(document_vectors, hierarchy=True)
def delete_documents(self, doc_ids):
"""
Delete documents from current model.
Warning: If document ids were not used in original model, deleting
documents will change the indexes and therefore doc_ids.
The documents will be deleted from the current model without changing
existing document, word and topic vectors. Topic sizes will be updated.
If deleting a large quantity of documents relative to the current model
size a new model should be trained for best results.
Parameters
----------
doc_ids: List of str, int
A unique value per document that is used for referring to documents
in search results.
"""
# make sure documents exist
self._validate_doc_ids(doc_ids, doc_ids_neg=[])
# update index
if self.documents_indexed:
# delete doc_ids from index
index_ids = [self.doc_id2index_id(doc_id) for doc_id in doc_ids]
for index_id in index_ids:
self.document_index.mark_deleted(index_id)
# update index_id and doc_ids
for doc_id in doc_ids:
self.doc_id2index_id.pop(doc_id)
for index_id in index_ids:
self.index_id2doc_id.pop(index_id)
# get document indexes from ids
doc_indexes = self._get_document_indexes(doc_ids)
# delete documents
if self.documents is not None:
self.documents = np.delete(self.documents, doc_indexes, 0)
# delete document ids
if self.document_ids is not None:
for doc_id in doc_ids:
self.doc_id2index.pop(doc_id)
keys = list(self.doc_id2index.keys())
self.document_ids = np.array(keys)
values = list(range(0, len(self.doc_id2index.values())))
self.doc_id2index = dict(zip(keys, values))
# delete document vectors
self._set_document_vectors(np.delete(self._get_document_vectors(norm=False), doc_indexes, 0))
if self.embedding_model == 'doc2vec':
num_docs = len(doc_indexes)
self.model.docvecs.count -= num_docs
self.model.docvecs.max_rawint -= num_docs
self.model.docvecs.vectors_docs_norm = None
self.model.docvecs.init_sims()
# update topics
self._unassign_documents_from_topic(doc_indexes, hierarchy=False)
if self.hierarchy is not None:
self._unassign_documents_from_topic(doc_indexes, hierarchy=True)
def get_num_topics(self, reduced=None):
"""
Get number of topics.
This is the number of topics Top2Vec has found in the data by default.
If reduced is True, the number of reduced topics is returned.
Parameters
----------
reduced: bool (Optional, default False)
The number of original topics will be returned by default. If True
will return the number of reduced topics, if hierarchical topic
reduction has been performed.
Returns
-------
num_topics: int
"""
reduced = self._validate_hierarchical_reduction(reduced)
if reduced:
return len(self.topic_vectors_reduced)
else:
return len(self.topic_vectors)
def get_topic_sizes(self, reduced=None):
"""
Get topic sizes.
The number of documents most similar to each topic. Topics are
in increasing order of size.
The sizes of the original topics is returned unless reduced=True,
in which case the sizes of the reduced topics will be returned.
Parameters
----------
reduced: bool (Optional, default False)
Original topic sizes are returned by default. If True the
reduced topic sizes will be returned.
Returns
-------
topic_sizes: array of int, shape(num_topics)
The number of documents most similar to the topic.
topic_nums: array of int, shape(num_topics)
The unique number of every topic will be returned.
"""
reduced = self._validate_hierarchical_reduction(reduced)
if reduced:
return np.array(self.topic_sizes_reduced.values), np.array(self.topic_sizes_reduced.index)
else:
return np.array(self.topic_sizes.values), np.array(self.topic_sizes.index)
def get_topics(self, num_topics=None, reduced=None):
"""
Get topics, ordered by decreasing size. All topics are returned
if num_topics is not specified.
The original topics found are returned unless reduced=True,
in which case reduced topics will be returned.
Each topic will consist of the top 50 semantically similar words
to the topic. These are the 50 words closest to topic vector
along with cosine similarity of each word from vector. The
higher the score the more relevant the word is to the topic.
Parameters
----------
num_topics: int, (Optional)
Number of topics to return.
reduced: bool (Optional, default False)
Original topics are returned by default. If True the
reduced topics will be returned.
Returns
-------
topics_words: array of shape(num_topics, 50)
For each topic the top 50 words are returned, in order
of semantic similarity to topic.
Example:
[['data', 'deep', 'learning' ... 'artificial'], <Topic 0>
['environment', 'warming', 'climate ... 'temperature'] <Topic 1>
...]
word_scores: array of shape(num_topics, 50)
For each topic the cosine similarity scores of the
top 50 words to the topic are returned.
Example:
[[0.7132, 0.6473, 0.5700 ... 0.3455], <Topic 0>
[0.7818', 0.7671, 0.7603 ... 0.6769] <Topic 1>
...]
topic_nums: array of int, shape(num_topics)
The unique number of every topic will be returned.
"""
reduced = self._validate_hierarchical_reduction(reduced)
if reduced:
if num_topics is None:
num_topics = len(self.topic_vectors_reduced)
else:
self._validate_num_topics(num_topics, reduced)
return self.topic_words_reduced[0:num_topics], self.topic_word_scores_reduced[0:num_topics], np.array(
range(0, num_topics))
else:
if num_topics is None:
num_topics = len(self.topic_vectors)
else:
self._validate_num_topics(num_topics, reduced)
return self.topic_words[0:num_topics], self.topic_word_scores[0:num_topics], np.array(range(0, num_topics))
def get_topic_hierarchy(self):
"""
Get the hierarchy of reduced topics. The mapping of each original topic
to the reduced topics is returned.
Hierarchical topic reduction must be performed before calling this
method.
Returns
-------
hierarchy: list of ints
Each index of the hierarchy corresponds to the topic number of a
reduced topic. For each reduced topic the topic numbers of the
original topics that were merged to create it are listed.
Example:
[[3] <Reduced Topic 0> contains original Topic 3
[2,4] <Reduced Topic 1> contains original Topics 2 and 4
[0,1] <Reduced Topic 3> contains original Topics 0 and 1
...]
"""
self._validate_hierarchical_reduction()
return self.hierarchy
def hierarchical_topic_reduction(self, num_topics):
"""
Reduce the number of topics discovered by Top2Vec.
The most representative topics of the corpus will be found, by
iteratively merging each smallest topic to the most similar topic until
num_topics is reached.
Parameters
----------
num_topics: int
The number of topics to reduce to.
Returns
-------
hierarchy: list of ints
Each index of hierarchy corresponds to the reduced topics, for each
reduced topic the indexes of the original topics that were merged
to create it are listed.
Example:
[[3] <Reduced Topic 0> contains original Topic 3
[2,4] <Reduced Topic 1> contains original Topics 2 and 4
[0,1] <Reduced Topic 3> contains original Topics 0 and 1
...]
"""
self._validate_hierarchical_reduction_num_topics(num_topics)
num_topics_current = self.topic_vectors.shape[0]
top_vecs = self.topic_vectors
top_sizes = [self.topic_sizes[i] for i in range(0, len(self.topic_sizes))]
hierarchy = [[i] for i in range(self.topic_vectors.shape[0])]
count = 0
interval = max(int(self._get_document_vectors().shape[0] / 50000), 1)
while num_topics_current > num_topics:
# find smallest and most similar topics
smallest = np.argmin(top_sizes)
res = np.inner(top_vecs[smallest], top_vecs)
sims = np.flip(np.argsort(res))
most_sim = sims[1]
if most_sim == smallest:
most_sim = sims[0]
# calculate combined topic vector
top_vec_smallest = top_vecs[smallest]
smallest_size = top_sizes[smallest]
top_vec_most_sim = top_vecs[most_sim]
most_sim_size = top_sizes[most_sim]
combined_vec = self._l2_normalize(((top_vec_smallest * smallest_size) +
(top_vec_most_sim * most_sim_size)) / (smallest_size + most_sim_size))
# update topic vectors
ix_keep = list(range(len(top_vecs)))
ix_keep.remove(smallest)
ix_keep.remove(most_sim)
top_vecs = top_vecs[ix_keep]
top_vecs = np.vstack([top_vecs, combined_vec])
num_topics_current = top_vecs.shape[0]
# update topics sizes
if count % interval == 0:
doc_top = self._calculate_documents_topic(topic_vectors=top_vecs,
document_vectors=self._get_document_vectors(),
dist=False)
topic_sizes = | pd.Series(doc_top) | pandas.Series |
import pandas as pd
from fastai.tabular.all import *
from fastai.tabular.data import *
from functools import reduce
from tqdm import tqdm, trange
learn = load_learner('monster_model_10batches.pkl')
df = pd.read_csv('../public_data/train.csv')
test = pd.read_csv('../public_data/test.csv')
build_owner = pd.read_csv('../public_data/building_ownership.csv')
build_struct = | pd.read_csv('../public_data/building_structure.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 3 01:08:17 2017
@author: Yuki
"""
import sys
import inspect
import pandas as pd
from PyQt5.QtCore import pyqtSignal,QObject
from PyQt5.QtWidgets import QApplication,QWidget,QFileDialog
from jupyterhack.MyGraph import MyGraphWindow
def transformMyTree(mytree,parent=None):
#MyTreeをMyTreeRawに変換する pickleのため
raw=MyTreeRaw(parent=parent,name=mytree.name)
for key,value in mytree.getChildren().items():
if isinstance(value,MyTree):
raw[key]=transformMyTree(value,raw)
else:
raw[key]=value
return raw
def transformMyRootTree(mytree,parent=None):
#MyTreeをMyTreeRawに変換する pickleのため
raw=MyTreeRaw(parent=parent,name=mytree.name)
for key,value in mytree.getChildren().items():
if isinstance(value,MyTree):
raw[key]=transformMyTree(value,raw)
else:
raw[key]=value
return raw
def transformMyTreeRaw(tree,parent=None):
#MyTreeRawをMyTreeに変換する unpickleのため
result=MyTree(parent=parent,name=tree.name)
for key,value in tree.items():
if isinstance(value,MyTreeRaw):
result.add(ref=transformMyTreeRaw(value,result),label=key)
else:
result.add(ref=value,label=key)
return result
def transformMyRootTreeRaw(tree,current_path,dependencies):
#MyTreeRawをMyRootTreeに変換する unpickleのため
try:
result=MyRootTree(name=tree.name)
for key,value in tree.items():
if isinstance(value,MyTreeRaw):
result.add(ref=transformMyTreeRaw(value,result),label=key)
else:
result.add(ref=value,label=key)
result.setCurrent(current_path)
return result
except Exception as e:
print(e)
print(dependencies)
raise Exception('Cannot unpickle the file.You may use a different environment from the one used when pickling. Use a environment satisfies the above requirements')
class MyTreeRaw(dict):
'''Tree構造を実装するクラス MyTreeをpickleする時にこれに変換する'''
def __init__(self,parent,myobject=None,name='temp'):
if myobject==None:
super().__init__({})
else:
super().__init__(myobject)
self.parent=parent #nodeはroot以外必ず親を持つ
self.name=name #子の名前はdict型のkey
#
#class MyRootTreeRaw(dict):
# '''Convert MyRootTree to a dictionary to avoid recursive error when pickling. Also record package dependencies to show them up when the file is opened in a different environment'''
# def __init__(self,parent,myobject=None,name='temp'):
# if myobject==None:
# super().__init__({})
# else:
# super().__init__(myobject)
# self.parent=parent #nodeはroot以外必ず親を持つ
# self.name=name #子の名前はdict型のkey
# self.dependencies={}
class MyTree(QObject):
'''
A data folder class that has a tree structure. This class's instance holds children as its attributes and you can access them '.childname'.
Note that you must reimplement getChildren method when you add a new attribute to this class for the above reason.
'''
SPACE=' '
INDENT='--'
addSignal=pyqtSignal(list,str,list) #path,label,refの順 最後は参照を入れたいので[ref]とする
deleSignal=pyqtSignal(list,str)
renameSignal=pyqtSignal(list,str,str)
def __init__(self,name='temp',parent=None,children=None):
#childrenには子をdict型{'名前':参照}で渡す
super().__init__()
self.parent=parent #nodeはroot以外必ず親を持つ
self.name=name
if not children==None:
for key,item in children.items():
self.__dict__[key]=item
def __reduce_ex__(self, proto):
#pickleのためのメソッド 動的にインスタンス変数を追加するクラスはそのままpickleできない
return transformMyTreeRaw,(transformMyTree(self),)
def __str__(self, level=0,current=None,unfold=True):
if self is current:
ret = self.SPACE*level+self.INDENT+self.name+'<=='+"\n"
else:
ret = self.SPACE*level+self.INDENT+self.name+"\n"
#MyTreeは先に展開それ以外のデータはunfoldがTrueならばkeyをprint
for key in sorted([key for key,value in self.getChildren().items() if isinstance(value,MyTree)]):
ret += self.__dict__[key].__str__(level=level+1,current=current,unfold=unfold)
if unfold:
for key in sorted([key for key,value in self.getChildren().items() if not isinstance(value,MyTree)]):
ret += self.SPACE*(level+1)+repr(key)+"\n"
return ret
def show(self,unfold=True):
sys.stdout.write(self.__str__(unfold=unfold))
def get(self,label):
return self.__dict__[label]
def getChildren(self):
#子供を{'名前':参照}で返す
children={k:v for k,v in self.__dict__.items() if not (k=='parent' or k=='name')}
return children
def add(self,ref,label=None,check=False,signal=True):
#childrenにlabelと同一の名前が無ければchildとして加える checkは違う名前の同一オブジェクトが無いかチェックするオプション
if label==None: #labelを指定していなければ呼び出し時の実引数をlabelとする
frame = inspect.currentframe()
stack = inspect.getouterframes(frame)
val_name = stack[1].code_context[0].split('(')[1].split(')')[0] #これで実引数の名前を取得できるらしい ただし関数内やJupyterのcell内で連続してaddを呼び出すと最後のaddの実引数をlabelにするのでlabel重複のエラーがでる
label=val_name
if label in self.__dict__.keys():
raise Exception('The same name already exists.Or you should call \'add\' method like this \'add(refference,\"name\")\' .')
if check:
result=self.checkChildren(ref)
if result[0]:
raise Exception('same object is registered as '+result[1])
else:
#フォルダの場合は親子関係を設定し、つけるラベルとフォルダの名前を一致させる addSignalは親に上げるためにtransmitAddSignalにconnect
if isinstance(ref,MyTree):
ref.name=label
ref.parent=self
ref.addSignal.connect(self.transmitAddSignal)
ref.deleSignal.connect(self.transmitDeleSignal)
ref.renameSignal.connect(self.transmitRenameSignal)
self.__dict__[label]=ref
else:
self.__dict__[label]=ref
else:
if isinstance(ref,MyTree):
ref.name=label
ref.parent=self
ref.addSignal.connect(self.transmitAddSignal)
ref.deleSignal.connect(self.transmitDeleSignal)
ref.renameSignal.connect(self.transmitRenameSignal)
self.__dict__[label]=ref
else:
self.__dict__[label]=ref
#signalをemit
if signal:
self.addSignal.emit([self.name],label,[ref])
def pop(self,label,signal=True):
target=self.__dict__.pop(label)
if isinstance(target,MyTree):
target.disconnect()
if signal:
self.deleSignal.emit([self.name],label)
return target
def plot(self,x,y,xlabel='X',ylabel='Y',title='No name'):
g=MyGraphWindow()
g.plot(x,y)
ax=g.fig.get_axes()[0]
ax.set_title(title)
g.setWindowTitle(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def try_add(graph,label):
self.add(graph,label)
try:
try_add(g,title)
except:
suffix=0
while True:
try:
try_add(g,title+str(suffix))
break
except:
suffix+=1
def rename(self,before,after,signal=True):
if (not before==after) and (not after in self.getChildren().keys()): #beforeとafterが違って afterが子供にいない時
ref=self.get(before)
self.pop(before,signal=False) #ちなみにpopとaddを逆にすると挙動が変になる popとaddでは同じオブジェクトを扱うがpopでシグナルをdisconnectしていることに注意
self.add(ref,label=after,signal=False)
if signal:
self.renameSignal.emit([self.name],before,after)
return True
else:
return False
def checkChildren(self,ref):
#子としてrefを持っていないかcheckする ref:オブジェクト参照
result=[False,None]
for key,child in self.getChildren().items():
if id(child)==id(ref):
result[0]=True
result[1]=key
break
return result
def ascend(self):
#親を遡って一番上からのfull_pathを返す
start=self
full_path=[start.name]
while not start.parent==None:
start=start.parent
full_path.append(start.name)
full_path.reverse()
return full_path
def search(self,target):
#カレントディレクトリ移動のためのサーチなのでディレクトリだけ調べる target:文字列
if self.name==target:
return {'result':True,'path':[self.name]}
for child in self.getChildren().values():
if isinstance(child,MyTree): #ディレクトリだけ調べる
answer=child.search(target)
if answer['result']:
return {'result':True,'path':([self.name]+answer['path'])}
#ここまでくれば探索は解無し
return {'result':False,'path':None}
def runAll(self):
#tree内を全ての参照のlistを返す
mylist=[]
mylist.append(self)
for child in self.getChildren().values():
if isinstance(child,MyTree):
mylist=mylist+child.runAll()
else:
mylist.append(child)
return mylist
def loadFiles(self):
def load_a_file(path):
if not path=='':
#1行だけ読み込んでみてstrが入っていればheaderとして使用してrootにもその名前で登録 そうでなければ'data0','data1',,,,としてrootに登録
reader=pd.read_csv(path,sep='\t',comment='#',header=None,chunksize=1)
data=reader.get_chunk(1)
ndata=None
if type(data.ix[0,0])==str:
ndata=pd.read_csv(path,sep='\t',comment='#')
else:
ndata= | pd.read_csv(path,sep='\t',comment='#',header=None) | pandas.read_csv |
"""
@author: hugonnet
derive all values present in the text of the manuscript: accelerations, SLR contributions, etc..
"""
import os, sys
import numpy as np
import pandas as pd
from glob import glob
import pyddem.fit_tools as ft
import pyddem.tdem_tools as tt
reg_dir = '/home/atom/ongoing/work_worldwide/vol/final'
fn_tarea = '/home/atom/data/inventory_products/RGI/tarea_zemp.csv'
list_fn_reg= [os.path.join(reg_dir,'dh_'+str(i).zfill(2)+'_rgi60_int_base_reg.csv') for i in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]]
periods = ['2000-01-01_2005-01-01','2005-01-01_2010-01-01','2010-01-01_2015-01-01','2015-01-01_2020-01-01','2000-01-01_2020-01-01']
tlims = [(np.datetime64('2000-01-01'),np.datetime64('2005-01-01')),(np.datetime64('2005-01-01'),np.datetime64('2010-01-01')),(np.datetime64('2010-01-01'),np.datetime64('2015-01-01')),(np.datetime64('2015-01-01'),np.datetime64('2020-01-01')),(np.datetime64('2000-01-01'),np.datetime64('2020-01-01'))]
list_df = []
for fn_reg in list_fn_reg:
for period in periods:
df_tmp = tt.aggregate_all_to_period(pd.read_csv(fn_reg),[tlims[periods.index(period)]],fn_tarea=fn_tarea,frac_area=1)
list_df.append(df_tmp)
df = pd.concat(list_df)
list_df_all = []
for period in periods:
df_p = df[df.period == period]
df_global = tt.aggregate_indep_regions_rates(df_p)
df_global['reg']='global'
df_global['period'] = period
df_noperiph = tt.aggregate_indep_regions_rates(df_p[~df_p.reg.isin([5, 19])])
df_noperiph['reg']='global_noperiph'
df_noperiph['period'] =period
df_full_p = pd.concat([df_p,df_noperiph,df_global])
list_df_all.append(df_full_p)
df_all = pd.concat(list_df_all)
df_g = df_all[df_all.reg=='global']
df_np = df_all[df_all.reg=='global_noperiph']
#CONTRIBUTION TO SLR
# from <NAME>: AVISO-based sea-level rise trend for 2000.0-2020.0 and 1-sigma errors
gmsl_trend = 3.56
gmsl_trend_err = 0.2
gmsl_acc = 0.15
gmsl_acc_err = 0.04
glac_trend = df_g[df_g.period == '2000-01-01_2020-01-01'].dmdt.values[0]/361.8
glac_trend_err = df_g[df_g.period == '2000-01-01_2020-01-01'].err_dmdt.values[0]/361.8
print('Glacier mass loss totalled '+'{:.2f}'.format(df_g[df_g.period == '2000-01-01_2020-01-01'].dmdt.values[0])+' ± '+'{:.2f}'.format(2*df_g[df_g.period == '2000-01-01_2020-01-01'].err_dmdt.values[0])+ ' Gt yr-1')
print('Glacier mass loss totalled '+'{:.3f}'.format(glac_trend)+' ± '+'{:.3f}'.format(2*glac_trend_err)+ ' mm of sea-level rise')
contr_trend = -glac_trend/gmsl_trend*100
contr_trend_err = -glac_trend/gmsl_trend*np.sqrt((gmsl_trend_err/gmsl_trend)**2+(glac_trend_err/glac_trend)**2)*100
print('Glacier contribution to SLR is '+'{:.2f}'.format(contr_trend)+' % ± '+'{:.2f}'.format(2*contr_trend_err)+' %')
#GLACIER ACCELERATION
beta1_t, beta0, incert_slope, _, _ = ft.wls_matrix(x=np.arange(0,16,5),y=df_g.dhdt.values[:-1],w=1/df_g.err_dhdt.values[:-1]**2)
print('Global thinning acceleration is '+'{:.5f}'.format(beta1_t)+' ± '+'{:.5f}'.format(2*incert_slope)+ ' m yr-2')
beta1, beta0, incert_slope, _, _ = ft.wls_matrix(x=np.array([0,5,10,15]),y=df_np.dhdt.values[:-1],w=1/df_np.err_dhdt.values[:-1]**2)
print('Global excl. GRL and ANT thinning acceleration is '+'{:.5f}'.format(beta1)+' ± '+'{:.5f}'.format(2*incert_slope)+ ' m yr-2')
beta1_g, beta0, incert_slope_g, _, _ = ft.wls_matrix(x=np.arange(0,16,5),y=df_g.dmdt.values[:-1],w=1/df_g.err_dmdt.values[:-1]**2)
print('Global mass loss acceleration is '+'{:.5f}'.format(beta1_g)+' ± '+'{:.5f}'.format(2*incert_slope_g)+ ' Gt yr-2')
beta1, beta0, incert_slope, _, _ = ft.wls_matrix(x=np.array([0,5,10,15]),y=df_np.dmdt.values[:-1],w=1/df_np.err_dmdt.values[:-1]**2)
print('Global excl. GRL and ANT mass loss acceleration is '+'{:.5f}'.format(beta1)+' ± '+'{:.5f}'.format(2*incert_slope)+ ' Gt yr-2')
#CONTRIBUTION TO ACCELERATION OF SLR
glac_acc = -beta1_g/361.8
glac_acc_err = incert_slope_g/361.8
contr_acc = glac_acc/gmsl_acc*100
# error is not symmetrial, error of acceleration of SLR is 20 times larger than glacier error
rss_gmsl_acc_err = np.sqrt(glac_acc_err**2+gmsl_acc_err**2)
upper_bound = glac_acc/(gmsl_acc-2*rss_gmsl_acc_err)*100
lower_bound = glac_acc/(gmsl_acc+2*rss_gmsl_acc_err)*100
print('Glacier contribution to acceleration of SLR is '+'{:.2f}'.format(contr_acc)+' % with 95% confidence interval of '+'{:.1f}'.format(lower_bound)+'-'+'{:.1f}'.format(upper_bound)+' %')
#YEARLY VALUES
periods = ['20'+str(i).zfill(2)+'-01-01_'+'20'+str(i+1).zfill(2)+'-01-01' for i in np.arange(0,20,1)]
tlims = [(np.datetime64('20'+str(i).zfill(2)+'-01-01'),np.datetime64('20'+str(i+1).zfill(2)+'-01-01')) for i in np.arange(0,20,1)]
list_df_yrly = []
for fn_reg in list_fn_reg:
for period in periods:
df_tmp = tt.aggregate_all_to_period(pd.read_csv(fn_reg),[tlims[periods.index(period)]],fn_tarea=fn_tarea,frac_area=1)
list_df_yrly.append(df_tmp)
df_yrly = pd.concat(list_df_yrly)
list_df_all_yrly = []
for period in periods:
df_p = df_yrly[df_yrly.period == period]
df_global = tt.aggregate_indep_regions_rates(df_p)
df_global['reg']='global'
df_global['period'] = period
df_noperiph = tt.aggregate_indep_regions_rates(df_p[~df_p.reg.isin([5, 19])])
df_noperiph['reg']='global_noperiph'
df_noperiph['period'] =period
df_full_p = pd.concat([df_p,df_noperiph,df_global])
list_df_all_yrly.append(df_full_p)
df_all_yrly = pd.concat(list_df_all_yrly)
dhdt_2000_global = df_all_yrly[np.logical_and(df_all_yrly.period=='2000-01-01_2001-01-01',df_all_yrly.reg=='global_noperiph')].dhdt.values[0]
dhdt_2000_global_err = df_all_yrly[np.logical_and(df_all_yrly.period=='2000-01-01_2001-01-01',df_all_yrly.reg=='global_noperiph')].err_dhdt.values[0]
dhdt_2019_global = df_all_yrly[np.logical_and(df_all_yrly.period=='2019-01-01_2020-01-01',df_all_yrly.reg=='global_noperiph')].dhdt.values[0]
dhdt_2019_global_err = df_all_yrly[np.logical_and(df_all_yrly.period=='2019-01-01_2020-01-01',df_all_yrly.reg=='global_noperiph')].err_dhdt.values[0]
print('Global excl. GRL and ANT thinning rates in 2000: '+'{:.3f}'.format(dhdt_2000_global)+' ± '+'{:.3f}'.format(2*dhdt_2000_global_err)+' m yr-1')
print('Global excl. GRL and ANT thinning rates in 2019: '+'{:.3f}'.format(dhdt_2019_global)+' ± '+'{:.3f}'.format(2*dhdt_2019_global_err)+' m yr-1')
# REGIONAL PERCENTAGES
df_tot = df_all[df_all.period == '2000-01-01_2020-01-01']
list_cont_perc = []
for i in range(19):
cont = df_tot[df_tot.reg==i+1].dmdt.values[0]/df_tot[df_tot.reg=='global'].dmdt.values[0]*100
list_cont_perc.append(cont)
print('Contribution of Alaska: '+'{:.1f}'.format(list_cont_perc[0])+' %')
print('Contribution of Greenland Periphery: '+'{:.1f}'.format(list_cont_perc[4])+' %')
print('Contribution of Arctic Canada North: '+'{:.1f}'.format(list_cont_perc[2])+' %')
print('Contribution of Arctic Canada South: '+'{:.1f}'.format(list_cont_perc[3])+' %')
print('Contribution of Antarctic Periphery: '+'{:.1f}'.format(list_cont_perc[18])+' %')
print('Contribution of High Moutain Asia: '+'{:.1f}'.format(list_cont_perc[12]+list_cont_perc[13]+list_cont_perc[14])+' %')
print('Contribution of Southern Andes: '+'{:.1f}'.format(list_cont_perc[16])+' %')
#separate contribution from North Greenland and South: done manually
print('Iceland specific rate: '+'{:.2f}'.format(df_tot[df_tot.reg==6].dmdtda.values[0])+' ± '+'{:.2f}'.format(2*df_tot[df_tot.reg==6].err_dmdtda.values[0])+' m w.e yr-1')
df_nonpolar = tt.aggregate_indep_regions_rates(df_tot[df_tot.reg.isin([10, 11, 12, 16, 17, 18])])
print('Non-polar specific rate: '+'{:.2f}'.format(df_nonpolar.dmdtda.values[0])+' ± '+'{:.2f}'.format(2*df_nonpolar.err_dmdtda.values[0])+' m w.e yr-1')
#for HMA, account for correlated error all at once:
fn_hma=os.path.join(reg_dir,'dh_13_14_15_rgi60_int_base_reg.csv')
df_hma = tt.aggregate_all_to_period(pd.read_csv(fn_hma),[(np.datetime64('2000-01-01'),np.datetime64('2020-01-01'))],fn_tarea=fn_tarea,frac_area=1)
print('HMA specific rate: '+'{:.2f}'.format(df_hma.dmdtda.values[0])+' ± '+'{:.2f}'.format(2*df_hma.err_dmdtda.values[0])+' m w.e yr-1')
print('Antarctic and Subantartic specific rate: '+'{:.2f}'.format(df_tot[df_tot.reg==19].dmdtda.values[0])+' ± '+'{:.2f}'.format(2*df_tot[df_tot.reg==19].err_dmdtda.values[0])+' m w.e yr-1')
#corresponding period for comparison to Shean et al., 2019
df_hma = tt.aggregate_all_to_period( | pd.read_csv(fn_hma) | pandas.read_csv |
import pandas as pd
import numpy as np
import datetime
import string
def combine_google_big_query_results(fnames):
dataframes = []
for fname in fnames:
df_ = pd.read_csv(fname)
dataframes.append(df_)
df = pd.concat(dataframes)
df.to_csv("../data/combined_df.csv")
def augment_df():
df = | pd.read_csv("../data/combined_df.csv") | pandas.read_csv |
from datetime import datetime, timedelta
from typing import Any
import weakref
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.parsing import parse_time_string
from pandas._libs.tslibs.period import Period
from pandas._typing import DtypeObj, Label
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
ensure_platform_int,
is_bool_dtype,
is_datetime64_any_dtype,
is_dtype_equal,
is_float,
is_integer,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
InvalidIndexError,
_index_shared_docs,
ensure_index,
maybe_extract_name,
)
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import DatetimeIndex, Index
from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops import get_op_result_name
from pandas.core.tools.datetimes import DateParseError
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass="PeriodIndex or list of Periods"))
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop("data")
if values.dtype == "int64":
freq = d.pop("freq", None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
@inherit_names(
["strftime", "to_timestamp", "asfreq", "start_time", "end_time"]
+ PeriodArray._field_ops,
PeriodArray,
wrap=True,
)
@inherit_names(["is_leap_year", "freq", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods.
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
dayofyear
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=year_arr, quarter=q_arr)
"""
_typ = "periodindex"
_attributes = ["name", "freq"]
# define my properties & methods for delegation
_is_numeric_dtype = False
_infer_as_myclass = True
_data: PeriodArray
freq: DateOffset
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
tz=None,
dtype=None,
copy=False,
name=None,
**fields,
):
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: PeriodArray, name: Label = None):
"""
Create a new PeriodIndex.
Parameters
----------
values : PeriodArray
Values that can be converted to a PeriodArray without inference
or coercion.
"""
assert isinstance(values, PeriodArray), type(values)
result = object.__new__(cls)
result._data = values
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result.name = name
result._cache = {}
result._reset_identity()
return result
# ------------------------------------------------------------------------
# Data
@property
def values(self):
return np.asarray(self)
@property
def _has_complex_internals(self):
# used to avoid libreduction code paths, which raise or require conversion
return True
def _shallow_copy(self, values=None, name: Label = no_default):
name = name if name is not no_default else self.name
cache = self._cache.copy() if values is None else {}
if values is None:
values = self._data
result = self._simple_new(values, name=name)
result._cache = cache
return result
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, DateOffset):
freqstr = other.rule_code
base = libfrequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
return dtype.freq == self.freq
# ------------------------------------------------------------------------
# Rendering Methods
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.astype(object)._values
@property
def _formatter_func(self):
return self.array._formatter(boxed=False)
# ------------------------------------------------------------------------
# Indexing
@cache_readonly
def _engine(self):
# To avoid a reference cycle, pass a weakref of self._values to _engine_type.
period = weakref.ref(self._values)
return self._engine_type(period, len(self))
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
hash(key)
try:
self.get_loc(key)
return True
except KeyError:
return False
@cache_readonly
def _int64index(self) -> Int64Index:
return Int64Index._simple_new(self.asi8, name=self.name)
# ------------------------------------------------------------------------
# Index Methods
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if func is np.add:
pass
elif func is np.subtract:
name = self.name
left = context[1][0]
right = context[1][1]
if isinstance(left, PeriodIndex) and isinstance(right, PeriodIndex):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if "M->M" not in func.types:
msg = f"ufunc '{func.__name__}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg)
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return type(self)(result, freq=self.freq, name=self.name)
def asof_locs(self, where, mask: np.ndarray) -> np.ndarray:
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx._values, freq=self.freq)
elif not isinstance(where_idx, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
elif where_idx.freq != self.freq:
raise raise_on_incompatible(self, where_idx)
locs = self.asi8[mask].searchsorted(where_idx.asi8, side="right")
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx.asi8 < self.asi8[first])] = -1
return result
@doc(Index.astype)
def astype(self, dtype, copy=True, how="start"):
dtype = pandas_dtype(dtype)
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
# TODO: should probably raise on `how` here, so we don't ignore it.
return super().astype(dtype, copy=copy)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError("Index is not monotonic")
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = ensure_index(target)
if isinstance(target, PeriodIndex):
if target.freq != self.freq:
# No matches
no_matches = -1 * np.ones(self.shape, dtype=np.intp)
return no_matches
target = target.asi8
self_index = self._int64index
else:
self_index = self
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
if self_index is not self:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return Index.get_indexer(self_index, target, method, limit, tolerance)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ensure_index(target)
if not self._is_comparable_dtype(target.dtype):
no_matches = -1 * np.ones(self.shape, dtype=np.intp)
return no_matches, no_matches
target = target.asi8
indexer, missing = self._int64index.get_indexer_non_unique(target)
return ensure_platform_int(indexer), missing
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parseable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
if not is_scalar(key):
raise InvalidIndexError(key)
if isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
asdt, reso = parse_time_string(key, self.freq)
except DateParseError as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
if grp == freqn:
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif method is None:
raise KeyError(key)
else:
key = asdt
elif is_integer(key):
# Period constructor will cast to string, which we dont want
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_slice_bound(self, label, side: str, kind: str):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem"]
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == "left" else 1]
except ValueError as err:
# string cannot be parsed as datetime-like
# TODO: we need tests for this case
raise KeyError(label) from err
elif is_integer(label) or is_float(label):
self._invalid_indexer("slice", label)
return label
def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
if reso not in ["year", "month", "quarter", "day", "hour", "minute", "second"]:
raise KeyError(reso)
grp = resolution.Resolution.get_freq_group(reso)
iv = Period(parsed, freq=(grp, 1))
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: str):
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
if not grp < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
raise ValueError
def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
# TODO: Check for non-True use_lhs/use_rhs
parsed, reso = | parse_time_string(key, self.freq) | pandas._libs.tslibs.parsing.parse_time_string |
import os
from typing import List, Dict
import psycopg2
import psycopg2.extras
import pandas
class Pgdb:
""" psycopg2 wrapper to query postgres database """
def __init__(self, url: str = None, autocommit: bool = False):
"""
:param url: (str)
"""
if url is None:
url = os.environ['DATABASE_URL']
self._connection = psycopg2.connect(url)
self._connection.set_session(autocommit=autocommit)
self._cursor = self._connection.cursor()
def commit(self):
self._connection.commit()
def execute_sql(self, sql):
self._cursor.execute(sql)
def select_table(self, table_name: str) -> List[Dict]:
"""
:param table_name: (str)
:return: (List[Dict])
"""
return self._select_table(self._cursor, table_name)
def insert_df(self,
table_name: str,
df: pandas.DataFrame,
page_size: int = 100,
primary_key: str = "id",
reset_serial: bool = True):
"""
:param table_name: (str)
:param df: (pandas.DataFrame)
:param page_size: (int)
"""
self._insert_df(self._cursor, table_name, df, page_size, primary_key)
def insert_csv(self,
table_name: str,
csv_path: str,
page_size: int = 100,
primary_key: str = "id"):
"""
:param table_name: (str)
:param csv_path: (str)
:param page_size: (int)
"""
self._insert_df(
cursor=self._cursor,
table_name=table_name,
df= | pandas.read_csv(csv_path) | pandas.read_csv |
#coding=utf-8
import os
import CSZLData
import CSZLFeatureEngineering as FE
import CSZLModel
import CSZLDisplay
import CSZLUtils
import pandas as pd
import datetime
import time
class CSZLWorkflow(object):
"""各种workflow 主要就是back testing"""
def BackTesting(self):
#Default_folder_path='./temp/'
Default_folder_path='D:/temp2/'
#zzzz=CSZLData.CSZLData("20220101","20220301")
#zzzz.getDataSet_all(Default_folder_path)
#"20150801","20220425"
dayA='20130101'#nomal/small
dayB='20170301'
#dayB='20200101'
#dayA='20150801'#nomal/small
#dayB='20220425'
dayC='20170301'
dayD='20220425'
dayA='20150801'#nomal/small
dayB='20220425'
dayC='20220201'
dayD='20220513'
#dayA='20190101'#nomal/small
#dayB='20190601'
#dayC='20210101'
#dayD='20220425'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FE05()
#zzzz=FE.CSZLFeatureEngineering("20170301","20220301",Default_folder_path)
#testpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FE05()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220408",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20190301",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220301",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20210101",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220401",Default_folder_path)
#testpath=zzzz.FE03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
#resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict(testpath,cur_model_path)
#cur_model_path2=cur_model.LGBmodelretrain(trainpath,resultpath2)
#resultpath3=cur_model.LGBmodelrepredict(testpath,resultpath,cur_model_path2)
resultpath=cur_model.MixOutputresult_groupbalence(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
#lastday=today_df['trade_date'].max()
#today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
#copy_df=today_df[today_df['trade_date']==lastday]
#copy_df.to_csv("Today_NEXT_predict.csv")
curdisplay=CSZLDisplay.CSZLDisplay()
curdisplay.Topk_nextopen(resultpath)
pass
def BackTesting_static_0501(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20150801'#nomal/small
dayB='20220425'
#dayA='20150801'#nomal/small
#dayB='20220425'
dayC=Day_start
dayD=Day_now
#dayD='20220506'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FE03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
cur_model.LGBmodelpredict(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen(resultpath)
pass
def BackTesting_static_0515(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20150801'#nomal/small
dayB='20220425'
#dayA='20150801'#nomal/small
#dayB='20220425'
dayC=Day_start
dayD=Day_now
dayD='20220513'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FE05()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FE05()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
cur_model.LGBmodelpredict(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen(resultpath)
pass
def BackTesting2(self):
#Default_folder_path='./temp/'
Default_folder_path='D:/temp2/'
#zzzz=CSZLData.CSZLData("20220101","20220301")
#zzzz.getDataSet_all(Default_folder_path)
zzzz=FE.CSZLFeatureEngineering("20130101","20170301",Default_folder_path)
trainpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering("20170301","20220301",Default_folder_path)
testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220408",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20190301",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20220101","20220301",Default_folder_path)
#testpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20190101","20200301",Default_folder_path)
#trainpath=zzzz.FE03()
#zzzz=FE.CSZLFeatureEngineering("20210101","20220301",Default_folder_path)
#testpath=zzzz.FE03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict(testpath,cur_model_path)
cur_model_path2=cur_model.LGBmodelretrain(trainpath,resultpath2)
resultpath3=cur_model.LGBmodelrepredict(testpath,resultpath,cur_model_path2)
#resultpath=cur_model.MixOutputresult(testpath,cur_model_path)
curdisplay=CSZLDisplay.CSZLDisplay()
curdisplay.Topk_nextopen(resultpath3)
pass
def RealTimePredict(self):
Default_folder_path='./temp2/'
#Default_folder_path='D:/temp2/'
#cur_model_path="D:/temp2/FE0320190101to20210101_0/LGBmodeltrainLGBmodel_003"
#cur_model_path="D:/temp2/FE0320150801to20220425_0/LGBmodeltrainLGBmodel_003"
cur_model_path="./temp2/FE0520150801to20220425_0/LGBmodeltrainLGBmodel_003"
#是否需要重新生成
if False:
#zzzz=FE.CSZLFeatureEngineering("20190101","20210101",Default_folder_path)
#trainpath=zzzz.FE03()
zzzz=FE.CSZLFeatureEngineering("20150801","20220425",Default_folder_path)
trainpath=zzzz.FE05()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain(trainpath)
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
CSZLData.CSZLDataWithoutDate.get_realtime_quotes(Default_folder_path,Day_start,Day_end)
zzzz=FE.CSZLFeatureEngineering(Day_start,Day_end,Default_folder_path)
#zzzz=FE.CSZLFeatureEngineering("20220301","20220420",Default_folder_path)
#trainpath=zzzz.FE03()
#bbbb=pd.read_pickle(trainpath)
#aaaa=bbbb.head(10)
#aaaa=aaaa.to_csv("tttt.csv")
zzzz.FE05_real(int(Day_now))
featurepath="Today_Joinfeature.csv"
cur_model=CSZLModel.CSZLModel()
#resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict(featurepath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence(featurepath,cur_model_path,resultpath)
pass
def RealTimePredict_CB(self):
Default_folder_path='./temp2/'
#Default_folder_path='D:/temp2/'
#cur_model_path="D:/temp2/FE0320190101to20210101_0/LGBmodeltrainLGBmodel_003"
#cur_model_path="D:/temp2/FE0320150801to20220425_0/LGBmodeltrainLGBmodel_003"
cur_model_path="./temp2/FECB0320130101to20220501_0/LGBmodeltrain_CBLGBmodel_003"
#是否需要重新生成
if False:
dayA='20130101'#nomal/small
dayB='20220501'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB02()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
CSZLData.CSZLDataWithoutDate.get_realtime_quotes_CB(Default_folder_path,Day_start,Day_end)
zzzz=FE.CSZLFeatureEngineering(Day_start,Day_end,Default_folder_path)
zzzz.FECB03_real(int(Day_now))
featurepath="Today_Joinfeature_CB.csv"
cur_model=CSZLModel.CSZLModel()
#resultpath2=cur_model.LGBmodelpredict(trainpath,cur_model_path)
resultpath=cur_model.LGBmodelpredict_CB(featurepath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(featurepath,cur_model_path,resultpath)
pass
def CBBackTesting(self):
Default_folder_path='D:/temp2/'
dayA='20130101'#nomal/small
dayB='20220501'
dayC='20220301'
dayD='20220505'
#dayD='20220506'
dayD='20220513'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FECB03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
cur_model.LGBmodelpredict_CB(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(testpath,cur_model_path)
curdisplay=CSZLDisplay.CSZLDisplay()
curdisplay.Topk_nextopen_CB(resultpath)
pass
def CBBackTesting_static_0508(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20130101'#nomal/small
dayB='20220301'
dayC=Day_start
dayD=Day_now
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB02()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FECB02()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
cur_model.LGBmodelpredict_CB(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict_CB.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen_CB(resultpath)
pass
def CBBackTesting_static_0515(self):
#生成需要的数据集
nowTime=datetime.datetime.now()
delta = datetime.timedelta(days=63)
delta_one = datetime.timedelta(days=1)
LastTime=nowTime-delta_one
month_ago = LastTime - delta
month_ago_next=month_ago+delta_one
Day_start=month_ago_next.strftime('%Y%m%d')
Day_end=LastTime.strftime('%Y%m%d')
Day_now=nowTime.strftime('%Y%m%d')
#Default_folder_path='./temp2/'
Default_folder_path='D:/temp2/'
dayA='20130101'#nomal/small
dayB='20220501'
dayC=Day_start
dayD=Day_now
dayD='20220513'
zzzz=FE.CSZLFeatureEngineering(dayA,dayB,Default_folder_path)
trainpath=zzzz.FECB03()
zzzz=FE.CSZLFeatureEngineering(dayC,dayD,Default_folder_path)
testpath=zzzz.FECB03()
cur_model=CSZLModel.CSZLModel()
cur_model_path=cur_model.LGBmodeltrain_CB(trainpath)
cur_model.LGBmodelpredict_CB(testpath,cur_model_path)
resultpath=cur_model.MixOutputresult_groupbalence_CB(testpath,cur_model_path)
today_df = pd.read_csv(resultpath,index_col=0,header=0)
lastday=today_df['trade_date'].max()
today_df['ts_code']=today_df['ts_code'].apply(lambda x : x[:-3])
copy_df=today_df[today_df['trade_date']==lastday]
copy_df.to_csv("Today_NEXT_predict_CB.csv")
#curdisplay=CSZLDisplay.CSZLDisplay()
#curdisplay.Topk_nextopen_CB(resultpath)
pass
def Todays_action(self,last_path,Today_result_path,changenum_max,singleamout,Auto=False):
#最高换手个数
#changenum_max=2
##total_amount=2000000
##单个买入额
#singleamout=1000
#修改显示行列数
pd.set_option('display.width', 5000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
if False:
df_stocklist=pd.read_csv(CSZLData.CSZLDataWithoutDate.get_stocklist(),index_col=0,header=0)
else:
df_stocklist=pd.read_csv("./Database/stocklist.csv",index_col=0,header=0)
df_stocklist_merge=df_stocklist[['ts_code','name']]
df_stocklist_merge['ts_code']=df_stocklist_merge['ts_code'].map(lambda x : x[:6])
df_stocklist_merge['ts_code']=df_stocklist_merge['ts_code'].fillna(0).apply(pd.to_numeric)
#print(df_stocklist_merge)
df_last=pd.read_csv(last_path,index_col=0,header=0)
df_hold=df_last[['ts_code','hold']]
#print(df_last)
df=pd.read_csv(Today_result_path,index_col=0,header=0)
#删除科创版和北交所以及ST含有的股票
df=df[df['ts_code']<688000]
df=pd.merge(df, df_stocklist_merge, how='left', on=['ts_code'])
df=df[~df['name'].str.contains('st|ST',na=False)]
df['mix_rank'].fillna(-99.99, inplace=True)
df['num_rank']=df['mix_rank'].rank(pct=False,ascending=False,method='min')
oldnumbers=df_last.shape[0]
df_oldcode_set=df[df['ts_code'].isin(df_last['ts_code'])]
df_oldcode_set=df_oldcode_set.sort_values(by=['num_rank'])
df_oldcode_set=pd.merge(df_oldcode_set, df_hold, how='left', on=['ts_code'])
# 排名1000以内的不更换,除非无更换对象
# 超过更换数量的更换指定数量
# 当无更换对象时只更换最后一个
enable_change_df=df_oldcode_set[df_oldcode_set['mix_rank']>-98]
dddd=enable_change_df[enable_change_df['num_rank']>1000]
real_change_df = dddd if dddd.shape[0]<changenum_max else dddd.tail(changenum_max)
if real_change_df.shape[0]==0:
real_change_df=enable_change_df.tail(1)
changenum_real=real_change_df.shape[0]
#print(real_change_df)
#固定变化后几位
df_holdset=df_oldcode_set[~df_oldcode_set['ts_code'].isin(real_change_df['ts_code'])]
del_show=df_oldcode_set[df_oldcode_set['ts_code'].isin(real_change_df['ts_code'])]
print("===show sell===")
if Auto:
print(del_show)
df_del_sever=del_show[["ts_code","0","19","mix_rank"]]
else:
print(del_show[['ts_code','name','num_rank','hold','close_show']])
print("===show sell end===")
df_choiceset=df[~df['ts_code'].isin(df_last['ts_code'])]
#防止无法买入,加的判断条件
df_choiceset=df_choiceset[df_choiceset['close_show']<(singleamout/100)]
df_choiceset=df_choiceset.sort_values(by=['num_rank'])
df_choiceset=df_choiceset.head(changenum_real)
df_choiceset['hold']=(singleamout/df_choiceset['close_show'])//100 * 100
print("===show buy===")
if Auto:
print(df_choiceset)
df_add_sever=df_choiceset[["ts_code","0","19","mix_rank"]]
#else:
# print(del_show[['ts_code','name','num_rank','hold','close_show']])
print(df_choiceset[['ts_code','hold','close_show','0','name']])
print("===show buy end===")
df_newset=df_holdset.append(df_choiceset, ignore_index=True)
df_newset=df_newset[['ts_code','trade_date','Shift_1total_mv_rank','0','19','num_rank','close_show','hold','name']]
print(df_newset)
#df_newset['price']=df_newset['close_show']*df_newset['hold']
#print(df_newset['price'])
#是否覆盖
if Auto:
df_server=df_del_sever.append(df_add_sever, ignore_index=True)
print(df_server)
df_server.to_csv("today_real_remix_result.csv")
df_newset.to_csv(last_path,encoding='utf-8-sig')
else:
print("是否覆盖前日结果 y/n")
if_cover=input()
if if_cover=='y' or if_cover=='Y':
df_newset.to_csv(last_path,encoding='utf-8-sig')
else:
df_newset.to_csv("temp_result.csv",encoding='utf-8-sig')
pass
def Todays_action_CB(self,last_path,Today_result_path,changenum_max,singleamout,Auto=False):
#最高换手个数
#changenum_max=2
##total_amount=2000000
##单个买入额
#singleamout=1000
#修改显示行列数
pd.set_option('display.width', 5000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
if False:
df_stocklist=pd.read_csv(CSZLData.CSZLDataWithoutDate.get_stocklist(),index_col=0,header=0)
else:
df_stocklist=pd.read_csv("./Database/cb_basic.csv",index_col=0,header=0)
df_stocklist_merge=df_stocklist[['ts_code','bond_short_name','stk_short_name']]
df_stocklist_merge['ts_code']=df_stocklist_merge['ts_code'].map(lambda x : x[:6])
df_stocklist_merge['ts_code']=df_stocklist_merge['ts_code'].fillna(0).apply(pd.to_numeric)
#print(df_stocklist_merge)
df_last= | pd.read_csv(last_path,index_col=0,header=0) | pandas.read_csv |
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import glob, os
#from calc_UMI_errorRate import *
import numpy as np
import pickle
#%%
"""
Function to accumulate Discordance rates from all relevant SAM files.
Requires import functions from calc_UMI_errorRate.py.
Args:
*_meRanGh_genomeMapTrim_dedupGroup.bam_ERCC.sam: SAM file only containing ERCC reads.
Return:
Dataframe containing discordance and concordance rates of each SAM file.
"""
counts_list = {}
coverage_list = {}
name_list = []
for file in glob.glob("**/*_meRanGh_genomeMapTrim_dedupGroup.bam_ERCC.sam", recursive=True):
print(file)
name = os.path.basename(file).split("_meRanGh_genomeMapTrim_dedupGroup.bam_ERCC.sam")[0]
name_list.append(name)
counts_file, coverage_file = countCs_from_SAM(file)
#subset_bam(file)
counts_list[name] = pd.Series(counts_file.values())
coverage_list[name] = pd.Series(coverage_file.values())
name_list.append(name)
#%%
coverage_df = pd.DataFrame(coverage_list)
coverage_melt = coverage_df.melt()
bins = [0,1,4,9,19,1000]
labels = ['1','2-4','5-9','10-19','20+']
coverage_melt['binned'] = pd.cut(coverage_melt['value'], bins, labels=labels)
#%%
coverage_binned = coverage_melt.pivot(values='binned', columns='variable')
coverage_binned = coverage_binned.apply(pd.value_counts, axis=0).T
coverage_binned.index.name = 'sample'
coverage_binned.columns=coverage_binned.columns.astype('str')
coverage_binned = coverage_binned.reset_index()
coverage_binned_melt = pd.melt(coverage_binned, id_vars='sample')
coverage_binned_melt['group'] = coverage_binned_melt['sample'].str.replace(r'_.*$', '')
#%%
#sns.stripplot(data=coverage_df[coverage_df > 0], palette='Paired')
sns.boxplot(data=coverage_df[coverage_df > 1], palette='Paired')
plt.show()
#%%
p = sns.catplot(data=coverage_binned_melt, x='group', y='value', hue='variable', size=4, aspect=2,
edgecolor="black", linewidth=2, palette='bright', kind='bar')
plt.xticks(rotation=90)
plt.yscale('log')
sns.despine()
sns.set_style("ticks")
#plt.savefig("UMI_group_coverage.png", bbox_inches='tight', dpi=400, transparent=True)
plt.show()
#%%
# m5C rate dict
rate_dict = []
counts_dict = []
for k, v in counts_list.items(): # k = sample name
#print(k)
for i, j in v.items(): # i = records on read
#print(j)
for x,y in j.items(): # x = position on read, y = nucleotide coverage at position in group
#print(y)
if ((coverage_list[k][i] >= 1) & (y >=1)): ## IMPORTANT GROUP COVERAGE, nuc count, CUTOFF ##
# (name of sample, coverage at position, count, m5C rate)
rate_dict.append(tuple([k, coverage_list[k][i], int(y), (int(y) / coverage_list[k][i])]))
#%%
rate_df = | pd.DataFrame.from_records(rate_dict, columns=['name', 'coverage', 'count', 'rate']) | pandas.DataFrame.from_records |
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '../')
from rankNews.query import NewQueryProcessor
import operator
import pandas as pd
import argparse
from rankNews.parse import MatchParser, PickleParser
from datetime import datetime, timedelta
parser = argparse.ArgumentParser(description='pass day')
parser.add_argument('--day', '-d')
args = parser.parse_args()
# def get_news_5days_before_tweet(day):
# is_date = tweets.created_at == day
# tweets_this_day = tweets[is_date]
# tweet_index = tweets_this_day.id.to_list()
# news_within_range = news[(news.publishdate <= day) & (news.publishdate >= (day - timedelta(days=5)))]
# return tweet_index, news_within_range.index.to_list()
# def get_news_before_tweet(news, date_range):
# news_within_range = news[(news.publishdate < day) & (news.publishdate >= (day - timedelta(days=date_range)))]
# return news_within_range
def get_tweet_and_news_5days(tweets, news, day):
is_date = tweets.created_at == day
tweets_this_day = tweets[is_date]
# tweet_index = tweets_this_day.id.to_list()
news_within_range = news[(news.publishdate <= day) & (news.publishdate >= (day - timedelta(days=5)))]
return tweets_this_day[['id', 'entity']], news_within_range[['id', 'entity']]
def main():
if int(args.day) > 24:
day = datetime(2020, 5, int(args.day)).date()
else:
day = datetime(2020, 6, int(args.day)).date()
# day = datetime(2018, 10, int(args.day)).date()
data_path = "/data1/xiuwen/twitter/"
news = pd.read_pickle(data_path + "tweet2020/news.pkl")
tweets = pd.read_pickle(data_path + "tweet2020/tweets.pkl")
tweets['created_at'] = pd.to_datetime(tweets.created_at, errors='coerce').dt.date
news['publishdate'] = | pd.to_datetime(news.publishdate, errors='coerce') | pandas.to_datetime |
# WeirdData Copyright (c) 2020.
# Author: <NAME>
# Web: https://github.com/WeirdData/GeoAnalysis
#
# Doctors in position at Primary Health Centres in Rural Areas
# Data from:
# https://data.gov.in/resources/stateut-wise-doctors-primary-health-centres-rural-areas-during-2005-and-2019
import json
import pandas as pd
from pprint import pprint
def get_data():
with open("data/doctors.json") as f:
data = json.load(f)
columns = {}
values = []
c = 0
for d in data:
for k in data[d]:
if isinstance(k, dict):
columns[c] = k['label']
c += 1
else:
values.append(k)
df = | pd.DataFrame(data=values) | pandas.DataFrame |
from keras_models_factory.utils4 import hash_array_sum as hash_array
import numpy as np
import pandas as pd
from keras_models_factory.utils4 import number_of_digits_post_decimal
class TestUtils4(object):
def test_hash_array_np(self):
nb_samples = 5
np.random.seed(0)
x1 = np.random.randn(nb_samples)
np.random.seed(0)
x2 = np.random.randn(nb_samples)
assert (x1==x2).all()
h1 = hash_array(x1)
h2 = hash_array(x2)
assert h1==h2
x3 = x2.copy()
h3 = hash_array(x3)
assert h3==h2
def test_hash_array_pd(self):
nb_samples = 5
np.random.seed(0)
S1 = pd.Series(np.random.randn(nb_samples))
S2 = pd.Series(np.random.randn(nb_samples))
x1 = | pd.concat({'main': S1, 'new': S2}, axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import sqlite3
def smmry(df):
display(df.info(), df.memory_usage(deep=True), df.head())
def csnap(df, fn=lambda x: x.shape, msg=None):
""" Custom Help function to print things in method chaining.
Returns back the df to further use in chaining.
"""
if msg:
print(msg)
display(fn(df))
return df
def setcols(df, fn=lambda x: x.columns.map('_'.join), cols=None):
"""Sets the column of the data frame to the passed column list.
"""
if cols:
df.columns = cols
else:
df.columns = fn(df)
return df
def cfilter(df, fn, axis="rows"):
""" Custom Filters based on a condition and returns the df.
function - a lambda function that returns a binary vector
thats similar in shape to the dataframe
axis = rows or columns to be filtered.
A single level indexing
"""
if axis == "rows":
return df[fn(df)]
elif axis == "columns":
return df.iloc[:, fn(df)]
def fndf(df, fn, cols):
df[cols] = fn(df[cols])
return df
def tonumeric(df, columns):
df[columns] = df[columns].apply(pd.to_numeric)
return df
def tocategorical(df, columns):
df[columns] = df[columns].astype('category')
return df
def todate(df, columns, frmt=None):
df[columns] = df[columns].astype(str)
df[columns] = | pd.to_datetime(df[columns], format=frmt) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 19:39:37 2021
@author: <NAME>
"""
import argparse
import yfinance as yf
import pandas as pd
from clearml import Task
task = Task.init(project_name='first ClearML steps', task_name='finance')
#%%
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--symbol', help='symbol used for regression', default='AAPL')
parser.add_argument('--plot', help='bool to control if plotly should open a browser', default=True)
args = parser.parse_known_args()
parameters = {
'LinearRegression': True,
'Ridge': False,
'SVR': False,
}
parameters = task.connect_configuration(configuration=parameters,
name='regressor selection',
description='set which regressor to run')
tickerData = yf.Ticker(args[0].symbol)
tickerDf = tickerData.history(period='max', interval='1d')[['Open', 'High', 'Low', 'Close', 'Volume']]
process(param=parameters, df=tickerDf, symbol=args[0].symbol, attrib='Close', plot=args[0].plot)
return
def plot_(df, show=False):
import plotly.express as px
import plotly.io as pio
pio.renderers.default='browser'
#pio.renderers.default='png'
fig = px.line(df, title=getattr(df, 'ticker'))
if show:
if task.running_locally():
fig.show()
return fig
def process(param, df, symbol, attrib='Close', shift=1, plot=False):
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
model = None
df_valid = df[-30:]
df_process = df[:-30]
df_lag1d = df_process - df_process.shift(shift)
df_change = df_lag1d / df_process * 100
df_change.columns += '_pcent'
df_change = df_change[1:] # remove first row containing NaN
df_process = df_process[1:]
y = df_change[attrib+'_pcent']
X = df_change.drop(y.name, axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=False)
assert (0 == X_train.isna().sum().sum())
assert (0 == y.isna().sum())
df_plot = | pd.concat([df_process[attrib], df_change[attrib+'_pcent']], axis=1) | pandas.concat |
# eBird Basic Dataset. Version: EBD_relDec-2020.
# Cornell Lab of Ornithology, Ithaca, New York. Dec 2020.
# The date in the raw data dump is most similar to the data returned from the eBird API
# get_details. Our main purpose here is to find any missing subIds. We could morph the
# data in the bulk dump, but for the small number of missing records, it is easier to
# just add them to visits
from typing import List, Optional
import pandas as pd
from shapely.geometry import Point
from common_paths import raw_data_path
from datetime_manipulation import normalize_time_for_visits
def load_bulk_data() -> pd.DataFrame():
bulk_data = None
# This is really specific, so hardwire paths for now
bulk_data_dir = raw_data_path / 'ebd_US-CA_202012_202101_prv_relDec-2020'
bulk_data_path = bulk_data_dir / 'ebd_US-CA_202012_202101_prv_relDec-2020.txt'
if not bulk_data_path.exists():
return None
bulk_data = pd.read_csv(bulk_data_path, dtype=str, header=0, sep='\t', low_memory=False).fillna(
'')
provisional_data_path = bulk_data_dir / 'ebd_US-CA_202012_202101_prv_relDec-2020_provisional.txt'
if provisional_data_path.exists():
prov_data = pd.read_csv(provisional_data_path, dtype=str, header=0, sep='\t',
low_memory=False).fillna('')
bulk_data = pd.concat([bulk_data, prov_data], axis=0, ignore_index=True)
return bulk_data
def find_missing_subids(visits: pd.DataFrame, bulk_data: Optional[pd.DataFrame],
xdates: List[str], region_codes: List[str]):
if bulk_data is None:
return []
mask = (bulk_data['OBSERVATION DATE'].isin(xdates)) & (
bulk_data['COUNTY CODE'].isin(region_codes))
bulk_subids = set(bulk_data[mask]['SAMPLING EVENT IDENTIFIER'].values)
base_subids = set(visits.subId.values)
return sorted(list(bulk_subids - set(base_subids)))
def use_basic_dataset(visits: pd.DataFrame, xdates: List[str],
region_codes: List[str]) -> pd.DataFrame:
# Consult Basic Dataset (EBD) bulk data from eBird to find missing subIds
# Append records to visits if any are found
# Takes about 13s to load BDS for Dec 2020
bulk_data = load_bulk_data()
if bulk_data is None:
return visits
missing_subids = find_missing_subids(visits, bulk_data, xdates, region_codes)
bds = bulk_data[bulk_data['SAMPLING EVENT IDENTIFIER'].isin(missing_subids)].copy().reset_index(
drop=True)
if bds.empty:
return visits
is_hotspot = bds['LOCALITY TYPE'].apply(lambda lt: lt=='H')
# Names match those in visits
new_col_names = {
'LOCALITY ID': 'locId', 'SAMPLING EVENT IDENTIFIER': 'subId', 'OBSERVER ID': 'Name',
'OBSERVATION DATE': 'obsDt', 'TIME OBSERVATIONS STARTED': 'obsTime',
'LOCALITY': 'loc_name', 'LATITUDE': 'latitude', 'LONGITUDE': 'longitude',
'COUNTY CODE': 'RegionCode'
}
bds.rename(columns=new_col_names, inplace=True)
numSpecies_df = bds.groupby(['subId']).size().reset_index(name='numSpecies').sort_values(
by=['subId'])
bds['loc_isHotspot'] = is_hotspot
bds = bds.drop_duplicates(['subId', 'obsDt', 'obsTime', 'latitude', 'longitude']).reset_index(
drop=True)
bds['numSpecies'] = numSpecies_df.numSpecies.values
bds.obsTime = bds.obsTime.apply(normalize_time_for_visits)
new_col_order = ['locId', 'subId', 'Name', 'numSpecies', 'obsDt', 'obsTime', 'loc_name',
'latitude', 'longitude', 'loc_isHotspot', 'RegionCode']
bds = bds[new_col_order].sort_values(by=['subId']).reset_index(drop=True)
for col in ['latitude', 'longitude']:
bds[col] = bds[col].apply(pd.to_numeric).fillna(0).astype(float)
vgeometry = [Point(x, y) for x, y in zip(bds.longitude, bds.latitude)] # Longitude first
bds['geometry'] = vgeometry
# We could fix 'Name' with 'userDisplayName' field from get_details, but not important here
# Need to fill in ['loc_isHotspot', 'RegionCode']
return | pd.concat([visits, bds], axis=0, ignore_index=True) | pandas.concat |
import fsspec
import os
import uuid
import pandas as pd
import pytest
from pins.tests.helpers import DEFAULT_CREATION_DATE, rm_env
from pins.config import PINS_ENV_INSECURE_READ
from pins.errors import PinsError, PinsInsecureReadError
from pins.meta import MetaRaw
from datetime import datetime, timedelta
from time import sleep
from pathlib import Path
# using pytest cases, so that we can pass in fixtures as parameters
# TODO: this seems like maybe overkill
from pytest_cases import fixture, parametrize
@fixture
def df():
import pandas as pd
return pd.DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]})
@fixture
def board(backend):
yield backend.create_tmp_board()
backend.teardown()
# misc ========================================================================
def test_board_validate_pin_name_root(board):
with pytest.raises(ValueError) as exc_info:
board.path_to_pin("/some_pin")
assert "Invalid pin name" in exc_info.value.args[0]
# pin_write ===================================================================
def test_board_pin_write_default_title(board):
df = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
meta = board.pin_write(df, "df_csv", title=None, type="csv")
assert meta.title == "df_csv: a pinned 3 x 2 DataFrame"
def test_board_pin_write_prepare_pin(board, tmp_dir2):
df = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
meta = board.prepare_pin_version(
str(tmp_dir2), df, "df_csv", title=None, type="csv"
)
assert meta.file == "df_csv.csv"
assert (tmp_dir2 / "data.txt").exists()
assert (tmp_dir2 / "df_csv.csv").exists()
assert not (tmp_dir2 / "df_csv.csv").is_dir()
def test_board_pin_write_roundtrip(board):
df = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
assert not board.pin_exists("df_csv")
board.pin_write(df, "df_csv", type="csv")
assert board.pin_exists("df_csv")
loaded_df = board.pin_read("df_csv")
assert loaded_df.equals(df)
def test_board_pin_write_type_not_specified_error(board):
class C:
pass
with pytest.raises(NotImplementedError):
board.pin_write(C(), "cool_pin")
def test_board_pin_write_type_error(board):
class C:
pass
with pytest.raises(NotImplementedError) as exc_info:
board.pin_write(C(), "cool_pin", type="MY_TYPE")
assert "MY_TYPE" in exc_info.value.args[0]
def test_board_pin_write_rsc_index_html(board, tmp_dir2, snapshot):
if board.fs.protocol != "rsc":
pytest.skip()
df = | pd.DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}) | pandas.DataFrame |
import datetime
import streamlit as st
import pandas as pd
import plotly.express as px
import numpy as np
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import requests as rs
import json
import random
import webbrowser
import yfinance as yf
from sys_var import api_list,indicator_symbol_list,graph_type_list
class MyError(Exception) :
# Constructor or Initializer
def __init__(self, value) :
self.value = value
# __str__ is to print() the value
def __str__(self) :
return (repr(self.value))
st.set_page_config(layout='wide')
st.sidebar.title('Financial Analysis Dashboard')
radio_select = st.sidebar.radio('Select from below options', [ 'Indian Stocks','Crypto', 'US Stocks', 'Forex',
"Global stocks and more(Alpha Vantage)",
"Global stocks and more(Yahoo Finance)"])
if radio_select == 'Crypto' :
st.title("CRYPTOCURRENCIES")
col1, col2 = st.columns(2)
with col1 :
digital_data = pd.read_csv("digital_currency_list.csv")
dictio = digital_data.set_index('currency name').T.to_dict('list')
digital_list = digital_data['currency name'].dropna().unique().tolist()
crypto_select1 = st.selectbox("Select a Cryptocurrency", digital_list)
input_value = dictio[crypto_select1][0]
with col2 :
currency_data = pd.read_csv("physical_currency_list.csv")
dictio2 = currency_data.set_index('currency name').T.to_dict('list')
currency_list = currency_data['currency name'].dropna().unique().tolist()
currency_select = st.selectbox("Select Currency Pair", currency_list)
currency_select = dictio2[currency_select][0]
with st.expander('Show Options'):
col3, col4 = st.columns(2)
col5, col6 = st.columns(2)
with col3 :
interval_list = ["1 Minute", "5 Minutes", "15 Minutes", "30 Minutes", "60 Minutes", "1 Day", "1 Week",
"1 Month"]
interval_list1 = ["1 Minute", "5 Minutes", "15 Minutes", "30 Minutes", "60 Minutes"]
interval_list2 = ["1 Day", "1 Week", "1 Month"]
interval_list1_dict = {"1 Minute" : "1min", "5 Minutes" : "5min", "15 Minutes" : "15min",
"30 Minutes" : "30min",
"60 Minutes" : "60min"}
interval_list2_dict = {"1 Day" : "DAILY", "1 Week" : "WEEKLY", "1 Month" : "MONTHLY"}
interval_list21_dict = {"1 Day" : "Daily", "1 Week" : "Weekly", "1 Month" : "Monthly"}
indicator_dict = {"1 Minute" : "1min", "5 Minutes" : "5min", "15 Minutes" : "15min", "30 Minutes" : "30min",
"60 Minutes" : "60min", "1 Day" : "daily", "1 Week" : "weekly", "1 Month" : "monthly"}
interval_select = st.selectbox("Select Interval", interval_list)
with col4 :
graph_type = st.selectbox('Select Graph type', graph_type_list)
flag = 0
if interval_select in interval_list1 :
flag = 1
try :
y_arr = ['Rate']
data = None
if flag == 1 :
data = rs.get("https://www.alphavantage.co/query?function=CRYPTO_INTRADAY&symbol=" + str(
input_value) + "&market=" + str(currency_select) + "&interval=" + interval_list1_dict[
interval_select] + "&apikey=" + random.choice(api_list))
print("jello")
data = data.json()
data = json.dumps(data["Time Series Crypto (" + str(interval_list1_dict[interval_select]) + ")"])
data = pd.read_json(data)
data = data.T.reset_index()
data.rename(columns={'1. open' : 'Open'}, inplace=True)
data.rename(columns={'2. high' : 'High'}, inplace=True)
data.rename(columns={'3. low' : 'Low'}, inplace=True)
data.rename(columns={'4. close' : 'Rate'}, inplace=True)
st.markdown(
"<h1 style='text-align: center; color: red;'>Chart of " + crypto_select1 + " <sub style='font-size: 25px;'>" + input_value + "/" + currency_select + "</sub></h1>",
unsafe_allow_html=True)
if graph_type == 'Line' :
# fig = px.line(data, x="index", y=y_arr, template="ggplot2", labels={"index" : "Date"})
fig = make_subplots(specs=[[{"secondary_y" : True}]])
fig.add_trace(go.Scatter(x=data['index'], y=data['Rate'], name='Rate'),
secondary_y=True)
fig.add_trace(go.Bar(x=data['index'], y=data['5. volume'], name='Volume', opacity=0.5),
secondary_y=False)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Candlesticks' or graph_type == 'OHLC' :
data.rename(columns={'Rate' : 'Close'}, inplace=True)
fig = make_subplots(specs=[[{"secondary_y" : True}]])
# include candlestick with rangeselector
if graph_type == 'Candlesticks':
fig.add_trace(go.Candlestick(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
if graph_type == 'OHLC':
fig.add_trace(go.Ohlc(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
# include a go.Bar trace for volumes
fig.add_trace(go.Bar(x=data['index'], y=data['5. volume'], name='Volume', opacity=0.5),
secondary_y=False)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Filled Area' :
fig = px.area(data, x='index', y='Rate', template="ggplot2", labels={"index" : "Date"})
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
st.plotly_chart(fig)
if flag == 0 :
data = rs.get("https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_" + interval_list2_dict[
interval_select] + "&symbol=" + str(
input_value) + "&market=" + str(currency_select) + "&apikey=" + random.choice(api_list))
data = data.json()
data = json.dumps(data["Time Series (Digital Currency " + str(interval_list21_dict[interval_select]) + ")"])
data = pd.read_json(data)
data = data.T.reset_index()
data.rename(columns={'4a. close (' + str(currency_select) + ')' : 'Rate'}, inplace=True)
data.rename(columns={'1a. open (' + str(currency_select) + ')' : 'Open'}, inplace=True)
data.rename(columns={'2a. high (' + str(currency_select) + ')' : 'High'}, inplace=True)
data.rename(columns={'3a. low (' + str(currency_select) + ')' : 'Low'}, inplace=True)
if graph_type != 'Filled Area' :
with col5 :
indicate_select = st.multiselect('Add Indicators', indicator_symbol_list)
interval_sel = indicate_select
with col6 :
time_select = st.number_input('Select indicator time period', max_value=30, min_value=5, step=1)
for i in range(len(interval_sel)) :
data2 = rs.get("https://www.alphavantage.co/query?function=" + interval_sel[i] + "&symbol=" + str(
input_value) + str(currency_select) + "&interval=" + indicator_dict[
interval_select] + "&time_period=" + str(
time_select) + "&series_type=open&apikey=" + random.choice(api_list))
data2 = data2.json()
data2 = json.dumps(data2["Technical Analysis: " + interval_sel[i]])
data2 = pd.read_json(data2)
data2 = data2.T.reset_index()
data = pd.merge(data, data2, on="index", how="left")
y_arr = y_arr + interval_sel
st.markdown(
"<h1 style='text-align: center; color: red;'>Chart of " + crypto_select1 + " <sub style='font-size: 25px;'>" + input_value + "/" + currency_select + "</sub></h1>",
unsafe_allow_html=True)
# fig = px.line(data, x="index", y=y_arr, template="ggplot2", labels={"index" : "Date"})
if graph_type == 'Line' :
fig = make_subplots(specs=[[{"secondary_y" : True}]])
fig.add_trace(go.Scatter(x=data['index'], y=data['Rate'], name='Rate'),
secondary_y=True)
for i in range(len(interval_sel)) :
fig.add_trace(go.Scatter(x=data['index'], y=data[interval_sel[i]], name=interval_sel[i]),
secondary_y=True)
fig.add_trace(go.Bar(x=data['index'], y=data['5. volume'], name='Volume', opacity=0.5),
secondary_y=False)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Candlesticks' or graph_type == 'OHLC' :
data.rename(columns={'Rate' : 'Close'}, inplace=True)
fig = make_subplots(specs=[[{"secondary_y" : True}]])
# include candlestick with rangeselector
if graph_type == 'Candlesticks' :
fig.add_trace(go.Candlestick(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
if graph_type == 'OHLC' :
fig.add_trace(go.Ohlc(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
# include a go.Bar trace for volumes
fig.add_trace(go.Bar(x=data['index'], y=data['5. volume'], name='Volume', opacity=0.5),
secondary_y=False)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Filled Area' :
fig = px.area(data, x='index', y='Rate', template="ggplot2", labels={"index" : "Date"})
fig.update_layout(autosize=False, width=1600, height=500, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
st.plotly_chart(fig)
except Exception as e :
st.info(
"The selected cryptocurrency data is currently unavailable please check your connection or choose any other cryptocurrency(like Bitcoin)")
if radio_select == 'Forex' :
st.title("FOREX")
size_select = st.sidebar.radio('Select output size', ['compact', 'full(uses more data)'])
size_select = size_select.split('(')[0]
col1, col2 = st.columns(2)
with col1 :
digital_data = pd.read_csv("physical_currency_list1.csv")
dictio = digital_data.set_index('currency name').T.to_dict('list')
digital_list = digital_data['currency name'].dropna().unique().tolist()
crypto_select1 = st.selectbox("Select the Currency", digital_list)
input_value = dictio[crypto_select1][0]
with col2 :
currency_data = pd.read_csv("physical_currency_list.csv")
dictio2 = currency_data.set_index('currency name').T.to_dict('list')
currency_list = currency_data['currency name'].dropna().unique().tolist()
currency_select = st.selectbox("Select currency pair", currency_list)
currency_select = dictio2[currency_select][0]
with st.expander('Show Options') :
col3, col4 = st.columns(2)
col5, col6 = st.columns(2)
with col3 :
interval_list = ["1 Day", "1 Week", "1 Month"]
interval_list2_dict = {"1 Day" : "DAILY", "1 Week" : "WEEKLY", "1 Month" : "MONTHLY"}
interval_list21_dict = {"1 Day" : "Daily", "1 Week" : "Weekly", "1 Month" : "Monthly"}
indicator_dict = {"1 Minute" : "1min", "5 Minutes" : "5min", "15 Minutes" : "15min", "30 Minutes" : "30min",
"60 Minutes" : "60min", "1 Day" : "daily", "1 Week" : "weekly", "1 Month" : "monthly"}
interval_select = st.selectbox("Select Interval", interval_list)
with col4 :
graph_type = st.selectbox('Select Graph type', graph_type_list)
flag = 0
try :
y_arr = ['Rate']
data = None
if flag == 0 :
print("https://www.alphavantage.co/query?function=FX_" + interval_list2_dict[
interval_select] + "&from_symbol=" + str(
input_value) + "&to_symbol=" + str(currency_select) + "&apikey=" + random.choice(api_list))
data = rs.get("https://www.alphavantage.co/query?function=FX_" + interval_list2_dict[
interval_select] + "&from_symbol=" + str(
input_value) + "&to_symbol=" + str(
currency_select) + "&outputsize=" + size_select + "&apikey=" + random.choice(api_list))
data = data.json()
print(data)
data = json.dumps(data["Time Series FX (" + str(interval_list21_dict[interval_select]) + ")"])
data = pd.read_json(data)
data = data.T.reset_index()
data.rename(columns={'4. close' : 'Rate'}, inplace=True)
data.rename(columns={'1. open' : 'Open'}, inplace=True)
data.rename(columns={'2. high' : 'High'}, inplace=True)
data.rename(columns={'3. low' : 'Low'}, inplace=True)
if graph_type != 'Filled Area' :
with col5 :
indicate_select = st.multiselect('Add Indicators', indicator_symbol_list)
interval_sel = indicate_select
with col6 :
time_select = st.number_input('Select indicator time period', max_value=30, min_value=5, step=1)
for i in range(len(interval_sel)) :
data2 = rs.get("https://www.alphavantage.co/query?function=" + interval_sel[i] + "&symbol=" + str(
input_value) + str(currency_select) + "&interval=" + indicator_dict[
interval_select] + "&time_period=" + str(
time_select) + "&series_type=open&outputsize=" + size_select + "&apikey=" + random.choice(
api_list))
data2 = data2.json()
data2 = json.dumps(data2["Technical Analysis: " + interval_sel[i]])
data2 = pd.read_json(data2)
data2 = data2.T.reset_index()
data = pd.merge(data, data2, on="index", how="left")
y_arr = y_arr + interval_sel
st.markdown(
"<h1 style='text-align: center; color: red;'>Chart of " + crypto_select1 + " <sub style='font-size: 25px;'>" + input_value + "/" + currency_select + "</sub></h1>",
unsafe_allow_html=True)
# fig = px.line(data, x="index", y=y_arr, template="ggplot2", labels={"index" : "Date"})
if graph_type == 'Line' :
fig = make_subplots(specs=[[{"secondary_y" : True}]])
fig.add_trace(go.Scatter(x=data['index'], y=data['Rate'], name='Rate'),
secondary_y=True)
for i in range(len(interval_sel)) :
fig.add_trace(go.Scatter(x=data['index'], y=data[interval_sel[i]], name=interval_sel[i]),
secondary_y=True)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Candlesticks' or graph_type == 'OHLC' :
data.rename(columns={'Rate' : 'Close'}, inplace=True)
fig = make_subplots(specs=[[{"secondary_y" : True}]])
# include candlestick with rangeselector
if graph_type == 'Candlesticks' :
fig.add_trace(go.Candlestick(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
if graph_type == 'OHLC' :
fig.add_trace(go.Ohlc(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
for i in range(len(interval_sel)) :
fig.add_trace(go.Scatter(x=data['index'], y=data[interval_sel[i]], name=interval_sel[i]),
secondary_y=True)
# include a go.Bar trace for volumes
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Filled Area' :
fig = px.area(data, x='index', y='Rate', template="ggplot2", labels={"index" : "Date"})
fig.update_layout(autosize=False, width=1600, height=500, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
st.plotly_chart(fig)
except Exception as e :
st.info(
"The selected forex pair data is currently unavailable please check your connection or choose any other pair")
if radio_select == "Global stocks and more(Alpha Vantage)" :
st.title(radio_select)
size_select = st.sidebar.radio('Select output size', ['compact', 'full(uses more data)'])
size_select = size_select.split('(')[0]
keyword = st.text_input("Search by symbol,name or keyword")
if keyword != '' :
print(keyword)
print('https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords=' + str(
keyword) + '&apikey=' + random.choice(api_list))
data = rs.get('https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords=' + str(
keyword) + '&apikey=' + random.choice(api_list))
data = data.json()
# data = pd.read_json(data)
try :
if data["bestMatches"] == [] :
raise (MyError('No financial entity with this name found in our system'))
data = json.dumps(data["bestMatches"])
data = pd.read_json(data)
data.rename(columns={'1. symbol' : 'Symbol'}, inplace=True)
data.rename(columns={'2. name' : 'Name'}, inplace=True)
data.rename(columns={'3. type' : 'Type'}, inplace=True)
data.rename(columns={'4. region' : 'Region'}, inplace=True)
data.rename(columns={'5. marketOpen' : 'Market Open'}, inplace=True)
data.rename(columns={'6. marketClose' : 'Market Close'}, inplace=True)
data.rename(columns={'7. timezone' : 'Timezone'}, inplace=True)
data.rename(columns={'8. currency' : 'Currency'}, inplace=True)
data_ticker = data['Symbol'].tolist()
data_name = data['Name'].tolist()
data_type = data['Type'].tolist()
data_region = data['Region'].tolist()
new_list = []
for i in range(len(data_ticker)) :
s = data_name[i] + "----" + data_ticker[i] + "----" + data_type[i] + "----" + data_region[i]
new_list.append(s)
new_list.insert(0, '--Select from options--')
col1, col2 = st.columns(2)
with col1 :
new_box = st.selectbox("Select from below options", new_list)
if (new_box != '--Select from options--') :
input_value = new_box.split("----")[1]
crypto_select1 = new_box.split("----")[0]
currency_select = data[data['Symbol'] == input_value]['Currency'].tolist()
currency_select1 = currency_select[0]
print(currency_select)
currency_data = pd.read_csv("physical_currency_list.csv")
currency_select = currency_data[currency_data['currency code'] == currency_select[0]]['currency name']
print(currency_select)
with col2 :
st.selectbox("Select Currency pair", currency_select, disabled=True)
st.table(data[data['Symbol'] == input_value].drop(['9. matchScore'], axis=1))
with st.expander('Show Options'):
col3, col4 = st.columns(2)
col5, col6 = st.columns(2)
with col3 :
interval_list = ["1 Day", "1 Week", "1 Month"]
interval_list2_dict = {"1 Day" : "DAILY", "1 Week" : "WEEKLY", "1 Month" : "MONTHLY"}
interval_list21_dict = {"1 Day" : "Daily", "1 Week" : "Weekly", "1 Month" : "Monthly"}
indicator_dict = {"1 Minute" : "1min", "5 Minutes" : "5min", "15 Minutes" : "15min",
"30 Minutes" : "30min",
"60 Minutes" : "60min", "1 Day" : "daily", "1 Week" : "weekly",
"1 Month" : "monthly"}
interval_select = st.selectbox("Select Interval", interval_list)
with col4 :
graph_type = st.selectbox('Select Graph type', graph_type_list)
flag = 0
try :
y_arr = ['Rate']
data = None
if flag == 0 :
data = rs.get("https://www.alphavantage.co/query?function=TIME_SERIES_" + interval_list2_dict[
interval_select] + "&symbol=" + str(
input_value) + "&outputsize=" + size_select + "&apikey=" + random.choice(api_list))
# data=rs.get('https://www.alphavantage.co/query?function=DAILY&symbol=RELIANCE.BSE&outputsize=full&apikey=demo')
data = data.json()
data = json.dumps(data["Time Series (" + str(interval_list21_dict[interval_select]) + ")"])
data = pd.read_json(data)
data = data.T.reset_index()
data.rename(columns={'4. close' : 'Rate'}, inplace=True)
data.rename(columns={'1. open' : 'Open'}, inplace=True)
data.rename(columns={'2. high' : 'High'}, inplace=True)
data.rename(columns={'3. low' : 'Low'}, inplace=True)
if graph_type != 'Filled Area' :
with col5 :
indicate_select = st.multiselect('Add Indicators', indicator_symbol_list)
interval_sel = indicate_select
with col6 :
time_select = st.number_input('Select indicator time period', max_value=30, min_value=5,
step=1)
for i in range(len(interval_sel)) :
data2 = rs.get(
"https://www.alphavantage.co/query?function=" + interval_sel[i] + "&symbol=" + str(
input_value) + "&interval=" + indicator_dict[
interval_select] + "&time_period=" + str(
time_select) + "&series_type=open&outputsize=" + size_select + "&apikey=" + random.choice(
api_list))
data2 = data2.json()
data2 = json.dumps(data2["Technical Analysis: " + interval_sel[i]])
data2 = pd.read_json(data2)
data2 = data2.T.reset_index()
data = pd.merge(data, data2, on="index", how="left")
y_arr = y_arr + interval_sel
st.markdown(
"<h1 style='text-align: center; color: red;'>Chart of " + crypto_select1 + " <sub style='font-size: 25px;'>" + input_value + "/" + currency_select1 + "</sub></h1>",
unsafe_allow_html=True)
# fig = px.line(data, x="index", y=y_arr, template="ggplot2", labels={"index" : "Date"})
if graph_type == 'Line' :
fig = make_subplots(specs=[[{"secondary_y" : True}]])
fig.add_trace(go.Scatter(x=data['index'], y=data['Rate'], name='Rate'),
secondary_y=True)
for i in range(len(interval_sel)) :
fig.add_trace(
go.Scatter(x=data['index'], y=data[interval_sel[i]], name=interval_sel[i]),
secondary_y=True)
fig.add_trace(go.Bar(x=data['index'], y=data['5. volume'], name='Volume', opacity=0.5),
secondary_y=False)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators",
font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Candlesticks' or graph_type == 'OHLC' :
data.rename(columns={'Rate' : 'Close'}, inplace=True)
fig = make_subplots(specs=[[{"secondary_y" : True}]])
# include candlestick with rangeselector
if graph_type == 'Candlesticks' :
fig.add_trace(go.Candlestick(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
if graph_type == 'OHLC' :
fig.add_trace(go.Ohlc(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
for i in range(len(interval_sel)) :
fig.add_trace(
go.Scatter(x=data['index'], y=data[interval_sel[i]], name=interval_sel[i]),
secondary_y=True)
# include a go.Bar trace for volumes
fig.add_trace(go.Bar(x=data['index'], y=data['5. volume'], name='Volume', opacity=0.5),
secondary_y=False)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators",
font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Filled Area' :
fig = px.area(data, x='index', y='Rate', template="ggplot2", labels={"index" : "Date"})
fig.update_layout(autosize=False, width=1600, height=500, legend_title="Indicators",
font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
st.plotly_chart(fig)
except Exception as e :
st.info(
"The selected financial entity data is currently unavailable please check your connection or choose another name")
except MyError as err :
st.info(err.value)
if radio_select == "US Stocks" :
st.title(radio_select)
keyword = st.text_input("Search by symbol,name or keyword")
size_select = st.sidebar.radio('Select output size', ['compact', 'full(uses more data)'])
size_select = size_select.split('(')[0]
if keyword != '' :
print(keyword)
print('https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords=' + str(
keyword) + '&apikey=' + random.choice(api_list))
data = rs.get('https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords=' + str(
keyword) + '&apikey=' + random.choice(api_list))
data = data.json()
# data = pd.read_json(data)
try :
if data["bestMatches"] == [] :
raise (MyError('No financial entity with this name found in our system'))
data = json.dumps(data["bestMatches"])
data = pd.read_json(data)
data.rename(columns={'1. symbol' : 'Symbol'}, inplace=True)
data.rename(columns={'2. name' : 'Name'}, inplace=True)
data.rename(columns={'3. type' : 'Type'}, inplace=True)
data.rename(columns={'4. region' : 'Region'}, inplace=True)
data.rename(columns={'5. marketOpen' : 'Market Open'}, inplace=True)
data.rename(columns={'6. marketClose' : 'Market Close'}, inplace=True)
data.rename(columns={'7. timezone' : 'Timezone'}, inplace=True)
data.rename(columns={'8. currency' : 'Currency'}, inplace=True)
data = data[data['Region'] == 'United States']
if data.count(axis=0)['Symbol'] == 0 :
raise (MyError('No US Stocks with this name found in our system'))
data_ticker = data['Symbol'].tolist()
data_name = data['Name'].tolist()
data_type = data['Type'].tolist()
data_region = data['Region'].tolist()
new_list = []
for i in range(len(data_ticker)) :
s = data_name[i] + "----" + data_ticker[i] + "----" + data_type[i]
new_list.append(s)
new_list.insert(0, '--Select from options--')
col1, col2 = st.columns(2)
with col1 :
new_box = st.selectbox("Select from below options", new_list)
if (new_box != '--Select from options--') :
input_value = new_box.split("----")[1]
crypto_select1 = new_box.split("----")[0]
currency_select = data[data['Symbol'] == input_value]['Currency'].tolist()
currency_select1 = currency_select[0]
currency_data = | pd.read_csv("physical_currency_list.csv") | pandas.read_csv |
documentation_path = 'answer_retention.md'
"""
See %s
""" % documentation_path
from argparse import ArgumentParser
from doctest import testfile
from numpy import cumsum, histogram
from os.path import splitext
from pandas import DataFrame, read_csv
from sys import argv, path
from accuracy import best_feature_classes, extract_features, student_column
from score import names, score
path.insert(0, '..')
from retention import write_pdf
retention_class_name = 'is_future_answer'
def reverse(items):
return items[::-1]
def reverse_cumulative_frequency(items):
bins = range(1, max(items) + 2)
counts, binedge = histogram(items, bins=bins)
return reverse(reverse(counts).cumsum())
def retention_rates(retention_counts):
rates = []
total = float(retention_counts[0])
for count in retention_counts:
rate = count / total
rates.append(rate)
return rates
def retention_steps(retention_counts):
rates = []
previous = float(retention_counts[0])
for count in retention_counts:
rate = count / previous
rates.append(rate)
previous = float(count)
return rates
def answer_history(answers):
students = answers.groupby('student')
answers['future_answers'] = students.cumcount(ascending=False)
answers['nth'] = students.cumcount() + 1
answers['is_10th'] = False
answers.loc[answers['nth'] % 10 == 0, 'is_10th'] = True
answers[retention_class_name] = True
answers.loc[answers['future_answers'] <= 0, retention_class_name] = False
def parse_answers(answer_csv):
answers = read_csv(answer_csv)
answers['answer'] = answers['answer'].convert_objects(convert_numeric=True)
answers = answers.dropna()
return answers
def feature(answer_csv):
answers = parse_answers(answer_csv)
augment_features(answers)
return save_csv(answers, answer_csv, 'feature')
def augment_features(answers):
answer_history(answers)
def predict(answer_csv, is_augment_features, classifier_index):
answers = parse_answers(answer_csv)
if is_augment_features:
augment_features(answers)
features, classes, feature_names = extract_features(answers, retention_class_name,
ignore_columns = [student_column, 'log', 'time', 'future_answers'])
feature_count = len(feature_names) / 4
features, classes = best_feature_classes(features, classes, feature_names,
feature_count = feature_count, is_verbose = True)
accuracy, classifier = score(features, classes, classifier_index)
result = '%s score %s features %s' % (names[classifier_index], accuracy, feature_count)
if 0 == classifier_index:
pdf = '%s.%s.pdf' % (splitext(answer_csv)[0], 'predict')
write_pdf(classifier, pdf)
result += ' pdf %s' % pdf
return result
def save_csv(answers, answer_csv, infix):
feature_csv = '%s.%s.csv' % (splitext(answer_csv)[0], infix)
answers.to_csv(feature_csv, index=False, float_format='%.3f')
return '%s\n%s' % (feature_csv, ''.join(open(feature_csv).readlines()[:10]))
def funnel(answer_csv):
answers = read_csv(answer_csv)
students = answers.groupby('student')
answer_counts = []
funnel_properties = {}
for student_id, student_group in students:
answer_count = len(student_group)
answer_counts.append(answer_count)
funnel_properties['answer_count'] = range(1, max(answer_counts) + 1)
funnel_properties['retention_count'] = reverse_cumulative_frequency(answer_counts)
funnel_properties['total_retention'] = retention_rates(funnel_properties['retention_count'])
funnel_properties['step_retention'] = retention_steps(funnel_properties['retention_count'])
funnel = | DataFrame(funnel_properties) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([ | pd.Timestamp('2016-02-08', tz='America/Sao_Paulo') | pandas.Timestamp |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + | Timestamp('2011-01-01') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 8 14:26:27 2021
@author: Devineni and Sven finally merged
"""
# Necessary modules
import pandas as pd
pd.set_option('mode.chained_assignment',None)
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
pd.options.plotting.backend = "matplotlib" # NOTE: This is useful in case the plotbackend has been changed by any previously (even befor machine shut-downs).
# from statistics import mean
from tabulate import tabulate
from sqlalchemy import create_engine
from uncertainties import ufloat
from uncertainties import unumpy
from uncertainties import umath
from post_processing import CBO_ESHL
# functions to print in colour
def prRed(skk): print("\033[31;1;m {}\033[00m" .format(skk))
def prYellow(skk): print("\033[33;1;m {}\033[00m" .format(skk))
# The following is a general syntax to dedine a MySQL connection
engine = create_engine("mysql+pymysql://root:Password123@localhost/",pool_pre_ping=True)
### =============================================================================
### engine = create_engine("mysql+pymysql://admin:the_secure_password_4_ever@localhost/",\
### pool_pre_ping=True) # Krishna's local address
###=============================================================================
### engine = create_engine("mysql+pymysql://wojtek:Password#[email protected]/",\
### pool_pre_ping=True) # Cloud server address
class ResidenceTimeMethodError(ValueError):
def __str__(self):
return 'You need to select a valid method: iso, trapez or simpson (default)'
#%% Function shape to be fitted for infinity concentration of tau_exh
def expfitshape(x, a, b):
return a*x*np.exp(-x/b)
#%% Function to find outliers
def find_outliers(col):
from scipy import stats
z = np.abs(stats.zscore(col))
idx_outliers = np.where(z>3,True,False)
return pd.Series(idx_outliers,index=col.index)
def residence_time_sup_exh(experiment='W_I_e0_ESHL', aperture_sensor = "2l", periodtime=120,
experimentname=False, plot=False,
export_sublist=False, method='simpson',
filter_maxTrel=0.25, logging=False):
"""
method:
'iso' (Default) The method described in ISO 16000-8 will be applied
however this method has a weak uncertainty analysis.
'trapez' corrected ISO 16000-8 method applying the trapezoidal method
for the interval integration and considers this in the
uncertainty evaluation.
'simpson' Applies the Simpson-Rule for the integration and consequently
considers this in the uncertainty evaluation.
filter_maxTrel:
Percentage value for the allowed deviation of the predefined
periodtime T of the devices. Only half-cycles which meet the
criterion ]T/2*(1-filter_maxTrel),T/2*(1+filter_maxTrel)[
are going to be evaluated.
"""
#%% Function import
"""Syntax to import a function from any folder. Useful if the function.py file
is in another folder other than the working folder"""
# import sys
# import sys
# sys.path.append("C:/Users/Devineni/OneDrive - bwedu/4_Recirculation/python_files/")
# from Outdoor_CO2 import outdoor # This function calculates the outdoor CO2 data
experimentglo = CBO_ESHL(experiment = experiment, aperture_sensor = aperture_sensor)
global a, b, df_tau_sup, df_tau_exh
#%% Control plot properties"
"""This syntax controls the plot properties(default plot font, shape, etc),
more attributes can be added and removed depending on the requirement """
from pylab import rcParams
rcParams['figure.figsize'] = 7,4.5
plt.rcParams["font.family"] = "calibri"
plt.rcParams["font.weight"] = "normal"
plt.rcParams["font.size"] = 10
plt.close("all")
if periodtime is None:
T = 120
prYellow('ATTENTION: periodtime has not been defined. I setted T=120s instead')
else:
T = periodtime
# T in s; period time of the ventilation systems push-pull devices.
# time = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/4_Recirculation/Times_thesis.xlsx", sheet_name="Timeframes")
# The dataframe time comes from the excel sheet in the path above, to make -
# - changes go to this excel sheet, edit and upload it to mysql.
lb = T/2*(1-filter_maxTrel) # lower bound of considered cycles
ub = T/2*(1+filter_maxTrel) # upper bound of considered cycles
time = pd.read_sql_query("SELECT * FROM testdb.timeframes;", con = engine)
#standard syntax to fetch a table from Mysql; In this case a table with the
# short-names of the measurements, all the start and end times, the DB-name
# of the measurement and the required table-names of the DB/schema is loaded into a dataframe.
#%% Load relevant data
t = time.index[time['short_name'].isin([experiment])==True].tolist()[0] # to select the experiment (see Timeframes.xlsx)
start = str(time["Start"][time.index[time['short_name'].isin([experiment])==True].tolist()[0]] - dt.timedelta(minutes=20))
end = str(time["End"][time.index[time['short_name'].isin([experiment])==True].tolist()[0]])
t0 = time["Start"][t]
# actual start of the experiment, out of the dataframe "time"
table = time["tables"][t].split(",") #Name of the ventilation device
try:
if aperture_sensor in aperture_sensor:
pass
else:
raise ValueError
except ValueError:
prYellow('ValueError: The sensor you selected is not an aperture sensor of the experiment. Select one of these: {}'.format(table))
return 'ValueError: The sensor you selected is not an aperture sensor of the experiment. Select one of these: {}'.format(table)
dum = [["Experiment", experiment ], ["Sensor", aperture_sensor]] # Creates a list of 2 rows filled with string tuples specifying the experiment and the sensor.
if experimentname:
print(tabulate(dum)) # Prints the inut details in a table
else:
pass
# database = time["database"][time.index[time['short_name'].isin([experiment])==True].tolist()[0]] # Selects the name of the database as a string
database = experimentglo.database
#%%% Load background data
#background, dummy = outdoor(str(t0), str(end), plot = False) # Syntax to call the background concentration function, "dummy" is only necessary since the function "outdoor" returns a tuple of a dataframe and a string.
# background = background["CO2_ppm"].mean()
background = experimentglo.aussen()['meanCO2'] # Future: implement cyclewise background concentration; Till now it takes the mean outdoor concentration of the whole experiment.
background_std = experimentglo.aussen()['sgm_CO2']
#%%% Load data of the experiment and the selected sensor
df = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND\
'{}'".format(database, aperture_sensor, start, end), con = engine)
df = df.loc[:,["datetime", "CO2_ppm"]]
df["original"] = df["CO2_ppm"] # Copies the original absolute CO2-concentrations data form CO2_ppm in a "backup"-column originals
df.columns = ["datetime", "original", "CO2_ppm"] # changes the order of the columns to the defined one
# df["original"] = df["CO2_ppm"] # Copies the original absolute CO2-concentrations data form CO2_ppm in a "backup"-column originals; this one can be deleted
df["CO2_ppm"] = df["CO2_ppm"] - background # substracts the background concentrations -> CO2_ppm contains CO2-concentration of some instance of time above background concentration.
if df["CO2_ppm"].min() < 0: # Sometimes the accumulated amount of CO2 concentraion becomes negative. This is not possible and would lead to a mistake for the integral calculation. An artificial offset lifts the whole decay curve at >=0.
offset = df["CO2_ppm"].min()
df["CO2_ppm"] = df["CO2_ppm"] - offset
df = df.loc[~df.duplicated(subset=["datetime"])] # Checks for duplicated in datetime and removed them; @Krishna: How can such a duplicate occur?
diff = (df["datetime"][1]-df["datetime"][0]).seconds # integer diff in s; Calculates the length of the time interval between two timestamps
df = df.set_index("datetime") # Resets the index of the dataframe df from the standard integer {0, 1, 2, ...} to be exchanged by the datetime column containing the timestamps.
t01 = t0
while not(t01 in df.index.to_list()): # The t0 from the excel sheet may not be precice that the sensor starts
t01 = t01 + dt.timedelta(seconds=1) # - starts at the same time so i used this while loop to calculate the
# - the closest t0 after the original t0
df["roll"] = df["CO2_ppm"].rolling(int(T/diff)).mean() # moving average for 2 minutes, used to calculate Cend; T = 120s is the period time of the push-pull ventilation devices which compose the ventilation system.
df["roll"] = df["roll"].fillna(method='bfill')
c0 = df["roll"].loc[t01] # C0; @DRK: Check if c0 = df["roll"].loc[t0] is better here. ## ORIGINAL: c0 = df["CO2_ppm"].loc[t0]
Cend37 = round((c0)*0.37, 2)
df2 = df # @DRK: From this line 101 schould be changed.
cend = df.loc[df2["roll"].le(Cend37)] # Cend: Sliced df of the part of the decay curve below the 37 percent limit
tn = df.index[-1]
if len(cend) == 0: # Syntax to find the tn of the experiment
print("The device has not reached 37% of its initial concentration")
else:
pass
#%%% Increase resolution
df = df.resample("5S").mean()
df['original'] = df['original'].interpolate(method='polynomial', limit_direction='forward',order=2)
df['CO2_ppm'] = df['CO2_ppm'].interpolate(method='polynomial', limit_direction='forward',order=2)
df['roll'] = df['roll'].interpolate(method='polynomial', limit_direction='forward',order=2)
#%%% Find max min points
from scipy.signal import argrelextrema # Calculates the relative extrema of data.
n = round(T / (2*diff)) # How many points on each side to use for the comparison to consider comparator(n, n+x) to be True.; @DRK: This value should depend on diff and T (period time of the push-pull devices). n = T / (2*diff)
df['max'] = df.iloc[argrelextrema(df['CO2_ppm'].values, np.greater_equal,\
order=n)[0]]['CO2_ppm'] # Gives all the peaks; "np.greater_equal" is a callable function which argrelextrema shall use to compare to arrays before and after the point currently evaluated by argrelextrema.
df['min'] = df.iloc[argrelextrema(df['CO2_ppm'].values, np.less_equal,\
order=n)[0]]['CO2_ppm'] # Gives all the valleys; "np.less_equal" is a callable function which argrelextrema shall use to compare to arrays before and after the point currently evaluated by argrelextrema.
#%%% Plot Original
if plot:
fig,ax = plt.subplots()
df.plot(title = "original " + experiment, color = [ 'silver', 'green', 'orange'], ax = ax)
df['max'].plot(marker='o', ax = ax) # This needs to be verified with the graph if python recognizes all peaks
df['min'].plot(marker="v", ax = ax) # - and valleys. If not adjust the n value.
else:
pass
#%%% Load data for the occupied space V3
alpha_mean, df_alpha, df_indoor = experimentglo.mean_curve()
alpha_mean_u = ufloat(alpha_mean[0], alpha_mean[1])
dfin_dCmean = df_indoor.loc[:,['mean_delta', 'std mean_delta']]
tmeancurve = dfin_dCmean.index.tolist()[0]
while not(tmeancurve in df.index.to_list()): # The t0 from the excel sheet may not be precice that the sensor starts
tmeancurve = tmeancurve + dt.timedelta(seconds=1)
datetime_index = pd.date_range(tmeancurve, dfin_dCmean.index.tolist()[-1], freq='5s')
dfin_dCmean = dfin_dCmean.reindex(datetime_index, method='bfill')
if t0 < dfin_dCmean.index.tolist()[0]:
mean_delta_0_room = dfin_dCmean.loc[dfin_dCmean.index.tolist()[0]]
deltat_mean = dfin_dCmean.index.tolist()[0] - t0
prYellow('ATTENTION: mean_delta_room starts {} after t0 = {}!'.format(deltat_mean, t0))
else:
mean_delta_0_room = dfin_dCmean.loc[t0]
mean_delta_0_room_u = ufloat(mean_delta_0_room[0],mean_delta_0_room[1])
#%%%%% Add mean and exhaust concentrations indoor (V3) to the dfin_dCmean
'''
mean concentrations:
Based on the calculated spatial and statistical mean air
age in the occupied space and the spacial average initial
concentration in the occupied space at t0.
'''
# count = 0
dfin_dCmean['room_av'] = pd.Series(dtype='float64')
dfin_dCmean['std room_av'] = pd.Series(dtype='float64')
dfin_dCmean['room_exh'] = pd.Series(dtype='float64')
dfin_dCmean['std room_exh'] = pd.Series(dtype='float64')
dfin_dCmean.reset_index(inplace=True)
if 'index' in dfin_dCmean.columns:
dfin_dCmean = dfin_dCmean.rename(columns={"index": "datetime"})
else:
pass
for count in range(len(dfin_dCmean)):
deltat = dfin_dCmean['datetime'][count]-t0
deltat = deltat.total_seconds()/3600
'''
mean concentrations:
Based on the calculated spatial and statistical mean air
age in the occupied space and the spacial average initial
concentration in the occupied space at t0.
'''
value = mean_delta_0_room_u*umath.exp(-1/(alpha_mean_u)*deltat)
dfin_dCmean['room_av'][count] = value.n
dfin_dCmean['std room_av'][count] = value.s
'''
exhaust concentrations:
Based on the calculated spatial and statistical mean air
age in the occupied space and the spacial average initial
concentration in the occupied space at t0.
'''
value = mean_delta_0_room_u*umath.exp(-1/(2*alpha_mean_u)*deltat)
dfin_dCmean['room_exh'][count] = value.n
dfin_dCmean['std room_exh'][count] = value.s
# count = count + 1
dfin_dCmean = dfin_dCmean.set_index('datetime')
#%%% Filter supply and exhaust phases
df.loc[df['min'] > -400, 'mask'] = False # Marks all min as False; @DRK: Why is this "-400" necessary?
df.loc[df['max'] > 0, 'mask'] = True # Marks all max as True; @DRK: This is just a back-up right? For the case I use for debugging there is no change happening for df.
df["mask"] = df["mask"].fillna(method='ffill').astype("bool") # Use forward to fill True and False
df = df.dropna(subset= ["mask"]) # In case there are NaNs left (at the beginning of the array) it drops/removes the whole time stamps/rows.
df["sup"] = df["mask"] # Create seperate columns for sup and exhaust; @DRK: Why is this necessary? At the end of these six lines of code df has 3 column {mask, sup, exh} containing all there the same data.
df["exh"] = df["mask"]
df.loc[df['min'] > 0, 'sup'] = True # The valleys have to be belong to supply as well
df.loc[df['max'] > 0, 'exh'] = False # The peaks have to belong to max, before it was all filled be backfill
df_sup = df.loc[df["sup"].to_list()] # Extract all the supply phases form df. Meaning only the timestamps maeked with "True" in df["sup"] are selected.
a = df_sup.resample("5S").mean() # Resampled beacuase, the time stamps are missing after slicing out the supply phases form df. The option "5S" adds the now missing time stamps again but without data. This is only necessary to plot the arrays flawlessly later in the same graphs again.
df_sup2 = a.loc[:,["CO2_ppm"]]
df_exh = df.loc[~df["exh"].values]
b = df_exh.resample("5S").mean()
df_exh2 = b.loc[:,["CO2_ppm"]]
sup_exh_df = pd.concat([dfin_dCmean, df_sup2, df_exh2], axis = 1).reset_index()
sup_exh_df.columns = ["datetime",
"meas room_av", "std meas room_av",
"calc room_av", "std calc room_av",
"calc room_exh", "std calc room_exh",
"supply", "exhaust"]
rows = sup_exh_df[~sup_exh_df['calc room_exh'].isnull()].index.tolist()
sup_exh_df['d calc exh-av'] = np.sqrt(np.power(sup_exh_df["calc room_exh"].loc[rows],2)\
- np.power(sup_exh_df["calc room_av"].loc[rows],2))
sup_exh_df['std d calc exh-av'] = np.sqrt(np.power(sup_exh_df["std calc room_exh"].loc[rows],2)\
+ np.power(sup_exh_df["std calc room_av"].loc[rows],2))
#%%%%%% Calculation of the weight factor of the current device period
ddCmax_exhav = sup_exh_df.loc[sup_exh_df['d calc exh-av'].idxmax()]
ddCmax_exhav = ddCmax_exhav.filter(['datetime','d calc exh-av','std d calc exh-av'])
#%%% Plot Matplotlib # This can be verified from this graph
# =============================================================================
# if plot:
# #%%%% supply
# plt.figure()
# a["CO2_ppm"].plot(title = "supply " + experiment)
# a["CO2_ppm"].plot(title = "supply")
#
# #%%%% exhaust
# b["CO2_ppm"].plot(title = "exhaust " + experiment) # Similar procedure is repeated from exhaust
# plt.figure()
# b["CO2_ppm"].plot(title = "exhaust") # Similar procedure is repeated from exhaust
#
# #%%%% Plot for extra prespective
# fig,ax1 = plt.subplots()
#
# df_sup.plot(y="CO2_ppm", style="yv-", ax = ax1, label = "supply")
# df_exh.plot(y="CO2_ppm", style="r^-", ax = ax1, label = "exhaust")
# else:
# pass
# =============================================================================
#%%% Plot Plotly
if plot:
pd.options.plotting.backend = "plotly" # NOTE: This changes the plot backend which should be resetted after it is not needed anymore. Otherwise it will permanently cause problems in future, since it is a permanent change.
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(go.Scatter(#title = time["short_name"][t],
name='meas room_av',
x = sup_exh_df["datetime"],
y = sup_exh_df["meas room_av"],
#error_y=dict(value=sup_exh_df["std meas room_av"].max())
)
)
fig.add_trace(go.Scatter(name='calc room_av',
x = sup_exh_df["datetime"],
y = sup_exh_df["calc room_av"],
#error_y = dict(value=sup_exh_df["std calc room_av"].max())
)
)
fig.add_trace(go.Scatter(name='calc room_exh',
x = sup_exh_df["datetime"],
y = sup_exh_df["calc room_exh"],
#error_y=dict(value=sup_exh_df["std calc room_exh"].max())
)
)
fig.add_trace(go.Scatter(name='d calc exh-av',
x = sup_exh_df["datetime"],
y = sup_exh_df["d calc exh-av"],
#error_y=dict(value=sup_exh_df["std d calc exh-av"].max())
)
)
fig.add_trace(go.Scatter(name='supply',x=sup_exh_df["datetime"], y = sup_exh_df["supply"]))
fig.add_trace(go.Scatter(name='exhaust',x=sup_exh_df["datetime"], y = sup_exh_df["exhaust"]))
fig.update_layout(
title="{} {}".format(database, aperture_sensor),
xaxis_title="Zeit t in hh:mm:ss",
yaxis_title=r'Verweilzeit $\bar{t}_1$',
legend_title="Legende",
font=dict(
family="Segoe UI",
size=18,
color="black"
)
)
fig.show()
import plotly.io as pio
pio.renderers.default='browser'
pd.options.plotting.backend = "matplotlib" # NOTE: This is a reset and useful in case the plotbackend has been changed by any previously (even befor machine shut-downs).
else:
pass
#%% Marking dataframes supply
"""Marks every supply dataframe with a number for later anaysis """
n = 1
df_sup3 = df_sup2.copy().reset_index()
start_date = str(t0); end_date = str(tn) # CHANGE HERE
mask = (df_sup3['datetime'] > start_date) & (df_sup3['datetime'] <= end_date)
df_sup3 = df_sup3.loc[mask]
for i,j in df_sup3.iterrows():
# *.interrows() will always return a tuple encapsulating an int for the
# index of the dataframe where it is applied to and a series containing
# the data of row selected. Therefore it is good to seperate both before in e.g. i,j .
try:
# print(not pd.isnull(j["CO2_ppm"]), (np.isnan(df_sup3["CO2_ppm"][i+1])))
if (not pd.isnull(j["CO2_ppm"])) and (np.isnan(df_sup3["CO2_ppm"][i+1])):
df_sup3.loc[i,"num"] = n
n = n+1
elif not pd.isnull(j["CO2_ppm"]):
df_sup3.loc[i,"num"] = n
except KeyError:
pass
# print("ignore the key error")
#%%%% Exporrt a file with all the supply curves sorted in a matrix for an excel diagram
df_sup_list = []
dummy_df = pd.DataFrame(columns=['datetime', 'CO2_ppm', 'num'])
for i in range(1, int(df_sup3['num'].max()+1)):
try:
if export_sublist and len(df_sup3.loc[df_sup3["num"]==i]) > 3:
dummy_df = dummy_df.append(df_sup3.loc[df_sup3["num"]==(i)])
dummy_df = dummy_df.rename(columns = {'CO2_ppm':'CO2_ppm_{}'.format(i)})
except KeyError:
pass
# print("ignore the key error")
df_sup_list.append(df_sup3.loc[df_sup3["num"]==i])
del dummy_df["num"]
if logging:
dummy_df.to_csv(r'D:\Users\sauerswa\wichtige Ordner\sauerswa\Codes\Python\Recirculation\export\df_sup_{}_{}.csv'.format(database, aperture_sensor), index=True)
#%%% Supply tau
# This method can be replicated in excel for crossreference
"""Calculates tau based in ISO 16000-8"""
if (database == "cbo_summer") or (database == "cbo_winter") or (database == "eshl_winter"):
engine1 = create_engine("mysql+pymysql://root:Password123@localhost/{}".format("cbo_calibration"),pool_pre_ping=True)
# engine = create_engine("mysql+pymysql://root:@172.16.17.32/{}".format("cbo_calibration"),pool_pre_ping=True)
elif database == "eshl_summer":
engine1 = create_engine("mysql+pymysql://root:Password123@localhost/{}".format("eshl_calibration"),pool_pre_ping=True)
# engine = create_engine("mysql+pymysql://root:@172.16.17.32/{}".format("eshl_calibration"),pool_pre_ping=True)
else:
print("Please select a correct database")
# self.cdf1 = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND '{}'".format(self.database, self.table, self.t0, self.tn), con = self.engine)
# self.cdf2 = self.cdf1.loc[:,["datetime", "CO2_ppm"]]
reg_result = pd.read_sql_table("reg_result", con = engine1).drop("index", axis = 1)
'''Calibration data for the particular sensor alone is filtered '''
global res
res = reg_result[reg_result['sensor'].str.lower() == aperture_sensor].reset_index(drop = True) # Contains the sensor calibration data and especially the calibration curve.
accuracy1 = 50 # it comes from the equation of uncertainity for testo 450 XL
accuracy2 = 0.02 # ±(50 ppm CO2 ±2% of mv)(0 to 5000 ppm CO2 )
accuracy3 = 50 # the same equation for second testo 450 XL
accuracy4 = 0.02
accuracy5 = 75 # # the same equation for second tes
accuracy6 = 0.03 # Citavi Title: Testo AG
df_tau_sup = []
s_rel_start = 1-df_sup_list[0].reset_index()['CO2_ppm'].loc[0]/mean_delta_0_room['mean_delta']
s_cyc = 0
for idf in df_sup_list:
if len(idf) > 3:
a = idf.reset_index(drop = True) # Overwride the dummy dataframe "a" by the currently chosen supply decay curve.
a['CO2_ppm_reg'] = a.eval(res.loc[0, "equation"]) # See: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.eval.html?highlight=pandas%20dataframe%20eval#pandas.DataFrame.eval
a = a.rename(columns = {'CO2_ppm':'CO2_ppm_original', 'CO2_ppm_reg': 'CO2_ppm'})
a = a.drop_duplicates(subset=['datetime'])
a = a.loc[:, ["datetime", "CO2_ppm_original", "CO2_ppm"]]
diff = (a["datetime"][1] - a["datetime"][0]).seconds
a["runtime"] = np.arange(0,len(a) * diff, diff)
lena = a["runtime"].iloc[-1]
if a["runtime"].iloc[-1]>T/2*(1-filter_maxTrel) and \
a["runtime"].iloc[-1]<T/2*(1+filter_maxTrel):
if logging:
prYellow("Since: {} < {} < {}, I consider the supply cycle {}".format(lb, lena, ub, s_cyc))
### Calculating the measurement uncertainty based on the uncertainties of the reference sensors and the deviation of the sensor during its calibration
a["s_meas"] = np.sqrt(np.square((a["CO2_ppm"] * accuracy2))
+ np.square(accuracy1) + np.square((a["CO2_ppm"] * accuracy4))
+ np.square(accuracy3) + np.square((a["CO2_ppm"] * accuracy6))
+ np.square(accuracy5)+ np.square(res.loc[0, "rse"])
+ np.square(background_std))
ns_meas = a['s_meas'].mean()
n = len(a['s_meas'])
global sa_num, s_lambda, s_phi_e
global area_sup, s_rest, s_total, a_rest, a_tot,sa_num,s_lambda, s_phi_e,s_rest, sa_rest, s_area
a = a.dropna()
a = a[a["CO2_ppm"] >= a["CO2_ppm"].iloc[0]*0.36]
a["log"] = np.log(a["CO2_ppm"])
a = a.dropna()
### ISO 16000-8 option to calculate slope (defined to be calculated by Spread-Sheat/Excel)
a["t-te"] = a["runtime"] - a["runtime"][len(a)-1]
a["lnte/t"] = a["log"][len(a)-1] - a["log"] # @DRK: The slope (as defined in ISO 16000-8) was always negative since the two subtrahend where in the wrong order.
a["slope"] = a["lnte/t"] / a["t-te"]
try:
if method=='iso':
slope = a["slope"].mean()
sumconz = a["CO2_ppm"].iloc[1:-1].sum()
area_sup = (diff * (a["CO2_ppm"][0]/2 + sumconz +a["CO2_ppm"][len(a)-1]/2))
print('ATTENTION: ISO 16000-8 method has a weak uncertainty evaluation consider using trapezoidal method is correcting this.')
elif method=='trapez':
### More acurate option to calculate the solpe of each (sub-)curve
x1 = a["runtime"].values
y1 = a["log"].values
from scipy.stats import linregress
slope = -linregress(x1,y1)[0]
from numpy import trapz
area_sup = trapz(a["CO2_ppm"].values, dx=diff) # proof that both methods have same answer: area_sup_2 = area_sup_1
print('ATTENTION: Trapezoidal method is used in ISO 16000-8 and here also considered in the uncertainty evaluation. However, more precise results are given by applying the Simpson-Rule.')
elif method=='simpson':
### More acurate option to calculate the solpe of each (sub-)curve
x1 = a["runtime"].values
y1 = a["log"].values
from scipy.stats import linregress
slope = -linregress(x1,y1)[0]
from scipy.integrate import simpson
area_sup = simpson(a["CO2_ppm"].values, dx=diff, even='first') # proof that both methods have same answer: area_sup_2 = area_s
else:
raise ResidenceTimeMethodError
except ResidenceTimeMethodError as err:
print(err)
a.loc[[len(a)-1], "slope"] = slope
# tail = a["CO2_ppm"][len(a)-1]/slope
a_rest = a["CO2_ppm"].iloc[-1]/slope
a_tot = area_sup + a_rest
tau = a_tot/a["CO2_ppm"][0]
a["tau_sec"] = tau
try:
if method=='iso':
# Taken from DIN ISO 16000-8:2008-12, Equation D2 units are cm3.m-3.sec
sa_num = ns_meas * (diff) * ((n - 1)/np.sqrt(n))
# The uncertainty of the summed trapezoidal method itself is not covered by ISO 16000-8.
sa_tm = 0
elif method=='trapez':
# Actually sa_num (the propagated uncertainty of the measurement) should be calculated this way
sa_num = (diff) * ns_meas * np.sqrt((2*n-1)/2*n)
# Aditionally the summed trapezoidal method itself has an uncertainty as well.
sa_tm = diff**2/12*(a["runtime"].loc[len(a)-1]-a["runtime"][0])*a["CO2_ppm"][0]/tau**2
elif method=='simpson':
# Actually sa_num (the propagated uncertainty of the measurement) should be calculated this way
sa_num = 1/3*diff*ns_meas*np.sqrt(2+20*round(n/2-0.5))
# Aditionally the summed trapezoidal method itself has an uncertainty as well.
sa_tm = diff**4/2880*(a["runtime"].loc[len(a)-1]-a["runtime"][0])*a["CO2_ppm"][0]/tau**4
else:
raise ResidenceTimeMethodError
except ResidenceTimeMethodError as err:
print(err)
s_lambda = a["slope"][:-1].std()/abs(a["slope"][:-1].mean())
s_phi_e = a["slope"][:-1].std()/slope
s_rest = np.sqrt(pow(s_lambda,2) + pow(s_phi_e,2))
sa_rest = s_rest * a_rest
s_area = np.sqrt(pow(sa_num,2) + pow(sa_tm,2) + pow(sa_rest,2))/a_tot
s_total = np.sqrt(pow(s_area,2) + pow(s_rel_start,2))
a.loc[:, "s_total"] = s_total*tau
#%%%%% Calculate weighting factor
sup_exh_df = sup_exh_df.set_index('datetime')
dfslice = sup_exh_df[a["datetime"][0]:a["datetime"][len(a)-1]]
dfslice = dfslice.filter(['d calc exh-av', 'std d calc exh-av'])
a = a.set_index('datetime')
a = pd.concat([a, dfslice], axis = 1).reset_index()
del dfslice
from scipy.integrate import simpson
area_weight = simpson(a["d calc exh-av"].values, dx=diff, even='first')
# Actually sa_num (the propagated uncertainty of the measurement) should be calculated this way
saw_num = 1/3*diff*np.mean(a["std d calc exh-av"])*np.sqrt(2+20*round(n/2-0.5))
# Aditionally the summed trapezoidal method itself has an uncertainty as well.
saw_tm = diff**4/2880*(a["runtime"].loc[len(a)-1]-a["runtime"][0])*ddCmax_exhav["d calc exh-av"]
saw = np.sqrt(pow(saw_num,2) + pow(saw_tm,2))
area_weight = ufloat(area_weight, saw)
weight = area_weight/(ufloat(ddCmax_exhav["d calc exh-av"],ddCmax_exhav["std d calc exh-av"])*a['runtime'].iloc[-1])
a.loc[:, "weight"] = weight.n
a.loc[:, "std weight"] = weight.s
a.loc[:, "Cycle"] = s_cyc
sup_exh_df.reset_index(inplace=True)
#%%%%% Summarise
df_tau_sup.append(a)
else:
if logging:
prRed("Since the supply cycle {} has a runtime of {} s it is outside [{}, {}]".format(s_cyc, lena, lb, ub))
pass
s_cyc = s_cyc + 1
else:
pass
#%%%% Supply tau from step-down curves
cyclnr_sup = []
tau_list_sup = []
stot_list_sup = []
weight_list_sup = []
saw_list_sup = []
for jdf in df_tau_sup:
cyclnr_sup.append(jdf["Cycle"][0])
tau_list_sup.append(jdf["tau_sec"][0])
stot_list_sup.append(jdf["s_total"][0])
weight_list_sup.append(jdf["weight"][0])
saw_list_sup.append(jdf["std weight"][0])
df_tau_s = pd.DataFrame({'Cycle':cyclnr_sup,
'tau_sup':tau_list_sup,
'std tau_sup':stot_list_sup,
'weight':weight_list_sup,
'std weight':saw_list_sup})
# Filter outliers (see https://medium.com/@stevenewmanphotography/eliminating-outliers-in-python-with-z-scores-dd72ca5d4ead)
df_tau_s['outliers'] = find_outliers(df_tau_s['tau_sup'])
df_tau_s = df_tau_s[df_tau_s['outliers']==False]
'''
Weighting factor for the supply phases is not as important since the
residence times here are mostly normal distributed throughout the phases.
Therefore it can be set low which means that almost all calcuated
residence times will be considered. The range for cfac_s is 0 to 1.
Values >= 1 will autmatically trigger that the residence times with the
highest weighting factor will be chosen.
'''
cfac_s = 0.2
df_tau_s2 = df_tau_s[df_tau_s['weight']>cfac_s]
if len(df_tau_s2) == 0:
df_tau_s2 = df_tau_s.nlargest(10, 'weight')
tau_list_sup_u = unumpy.uarray(df_tau_s2['tau_sup'],df_tau_s2['std tau_sup'])
weight_list_sup_u = unumpy.uarray(df_tau_s2['tau_sup'],df_tau_s2['std tau_sup'])
# Mean supply phase residence time
tau_s_u = sum(tau_list_sup_u*weight_list_sup_u)/sum(weight_list_sup_u)
# df_tau_s = pd.DataFrame({'nom' : [], 'std' : []})
# count = 0
# while (count < len(tau_list_sup_u)):
# df_tau_s.loc[count,['nom']] = tau_list_sup_u[count].n
# df_tau_s.loc[count,['std']] = tau_list_sup_u[count].s
# count = count + 1
#%%%%% Plot: residence times of the step-down curves during supply-phase
if plot:
import plotly.io as pio
pio.renderers.default='browser'
pd.options.plotting.backend = "matplotlib"
#######################################################################
pd.options.plotting.backend = "plotly"
import plotly.io as pio
pio.renderers.default='browser'
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Create figure with secondary y-axis
fig2 = make_subplots(specs=[[{"secondary_y": True}]])
fig2.add_trace(go.Scatter(name='Verweilzeit',
x = df_tau_s['Cycle'],
y = df_tau_s['tau_sup'],
error_y=dict(value=df_tau_s['std tau_sup'].max())
),
secondary_y=False,
)
fig2.add_trace(go.Scatter(name='Gewichtung',
x = df_tau_s['Cycle'],
y = df_tau_s['weight'],
error_y=dict(value=df_tau_s['std weight'].max())
),
secondary_y=True,
)
fig2.update_layout(
title="Zuluft",
xaxis_title="Zyklusnummer",
yaxis_title=r'Verweilzeit $\bar{t}_1$',
legend_title="Legende",
font=dict(
family="Segoe UI",
size=18,
color="black"
)
)
fig2.show()
import plotly.io as pio
pio.renderers.default='browser'
pd.options.plotting.backend = "matplotlib"
#%% Marking dataframes exhaust
"""Marks every exhaust dataframe with a number for later anaysis """
n = 1
df_exh3 = df_exh2.copy().reset_index()
mask = (df_exh3['datetime'] > start_date) & (df_exh3['datetime'] <= end_date)
df_exh3 = df_exh3.loc[mask]
for i,j in df_exh3.iterrows():
try:
# print(not pd.isnull(j["CO2_ppm"]), (np.isnan(df_exh3["CO2_ppm"][i+1])))
if (not pd.isnull(j["CO2_ppm"])) and (np.isnan(df_exh3["CO2_ppm"][i+1])):
df_exh3.loc[i,"num"] = n
n = n+1
elif (not pd.isnull(j["CO2_ppm"])):
df_exh3.loc[i,"num"] = n
except KeyError:
pass
# print("ignore the key error")
#%%%% Exporrt a file with all the exhaust curves sorted in a matrix for an excel diagram
df_exh_list = []
del dummy_df
dummy_df = pd.DataFrame(columns=['datetime', 'CO2_ppm', 'num'])
for i in range(1, int(df_exh3.num.max()+1)):
try:
if export_sublist and len(df_sup3.loc[df_exh3["num"]==i]) > 3:
dummy_df = dummy_df.append(df_exh3.loc[df_exh3["num"]==(i)])
dummy_df = dummy_df.rename(columns = {'CO2_ppm':'CO2_ppm_{}'.format(i)})
except KeyError:
pass
# print("ignore the key error")
df_exh_list.append(df_exh3.loc[df_exh3["num"]==i])
del dummy_df["num"]
if logging:
dummy_df.to_csv(r'D:\Users\sauerswa\wichtige Ordner\sauerswa\Codes\Python\Recirculation\export\df_exh_{}_{}.csv'.format(database, aperture_sensor), index=True)
#%%% Exhaust tau
# this method can be replicated in Excel for crossverification
#%%%% Calculates tau based in area under the curve
df_tau_exh = []
e_cyc = 0
for e in df_exh_list:
if len(e) > 3:
# %%%%% Structure columns
b = e.reset_index(drop = True) # Overwride the dummy dataframe "a" by the currently chosen supply decay curve.
b['CO2_ppm_reg'] = b.eval(res.loc[0, "equation"]) # See: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.eval.html?highlight=pandas%20dataframe%20eval#pandas.DataFrame.eval
b = b.rename(columns = {'CO2_ppm':'CO2_ppm_original', 'CO2_ppm_reg': 'CO2_ppm'})
b = b.drop_duplicates(subset=['datetime'])
b = b.loc[:, ["datetime", "CO2_ppm_original", "CO2_ppm"]]
b = b.dropna()
diff = (b["datetime"][1] - b["datetime"][0]).seconds
b["runtime"] = np.arange(0,len(b) * diff, diff)
lenb = b["runtime"].iloc[-1]
if b["runtime"].iloc[-1]>T/2*(1-filter_maxTrel) and b["runtime"].iloc[-1]<T/2*(1+filter_maxTrel):
if logging:
prYellow("Since: {} < {} < {}, I consider the exhaust cycle {}".format(lb, lenb, ub, e_cyc))
# %%%%% Calculating the measurement uncertainty
'''
based on the uncertainties of the reference sensors and the
deviation of the sensor during its calibration
'''
b["std CO2_ppm"] = np.sqrt(np.square((b["CO2_ppm"] * accuracy2))
+ np.square(accuracy1) + np.square((b["CO2_ppm"] * accuracy4))
+ np.square(accuracy3) + np.square((b["CO2_ppm"] * accuracy6))
+ np.square(accuracy5)+ np.square(res.loc[0, "rse"]))
#%%%%% Add mean concentrations indoor (V3) to the exhaust dataframe
# '''
# mean concentrations:
# mean concentration over all measurements from
# senors in the occupied space (V3)
# '''
# b = b.set_index('datetime')
# index_list = b.index.tolist()
# count = 0
# while (count < len(index_list)):
# index_list[count] = index_list[count].strftime("%Y-%m-%d %H:%M:%S")
# count = count + 1
# b.loc[:,['mean_delta', 'std mean_delta']] = dfin_dCmean.loc[index_list]
# b.reset_index(inplace=True)
# b['delta_C_mean'] = b['mean_delta'][0] - b['CO2_ppm']
# # b = b.rename(columns = {'CO2_ppm_original':'CO2_ppm_original',
# # 'CO2_ppm': 'CO2_ppm_reg',
# # 'mean_delta': 'indoor_mean',
# # 'delta_C': 'CO2_ppm'})
#%%%%% Add mean concentrations indoor (V3) to the exhaust dataframe
# '''
# mean concentrations:
# Based on the calculated spatial and statistical mean air
# age in the occupied space and the spacial average initial
# concentration in the occupied space at t0.
# '''
# count = 0
# b['room_av'] = pd.Series(dtype='float64')
# b['std room_av'] = pd.Series(dtype='float64')
# while (count < len(b['datetime'])):
# value = mean_delta_0_room_u*unumpy.exp(-1/(alpha_mean_u)*((b['datetime'][count]-t0).total_seconds()/3600))
# b['room_av'][count] = value.n
# b['std room_av'][count] = value.s
# count = count + 1
#%%%%% Add exhaust concentrations indoor (V3) to the exhaust dataframe
'''
exhaust concentrations:
Based on the calculated spatial and statistical mean air
age in the occupied space and the spacial average initial
concentration in the occupied space at t0.
'''
count = 0
b['room_exh'] = pd.Series(dtype='float64')
b['std room_exh'] = pd.Series(dtype='float64')
while (count < len(b['datetime'])):
value = mean_delta_0_room_u*umath.exp(-1/(2*alpha_mean_u)*\
((b['datetime'][count]-t0).total_seconds()/3600))
b['room_exh'][count] = value.n
b['std room_exh'][count] = value.s
count = count + 1
#%%%%% Concentration level after infinit time
'''
A step-up concentration curve approaches to a certain max
after infinit time. This concentration results out of the
exhaust concentration of the room and therefore from the
residence time of V3.
'''
dC3e = sum(unumpy.uarray(b['room_exh'],b['std room_exh']))/\
len(unumpy.uarray(b['room_exh'],b['std room_exh']))
#%%%%% Calculate Delta C between exhaust of V3 and and exhaust of V2
'''
'''
count = 0
b['dC 2e3e exh'] = pd.Series(dtype='float64')
b['std dC 2e3e exh'] = pd.Series(dtype='float64')
while (count < len(b['datetime'])):
dC_2e_u = ufloat(b["CO2_ppm"][count],b["std CO2_ppm"][count])
dC_3e_u = ufloat(b["room_exh"][count],b["std room_exh"][count])
value = dC_3e_u - dC_2e_u
b['dC 2e3e exh'][count] = value.n
b['std dC 2e3e exh'][count] = value.s
count = count + 1
#%%%%% Calculation of the logarithmic concentration curves
b = b.dropna()
b = b[b["dC 2e3e exh"] >= b["dC 2e3e exh"].iloc[0]*0.36]
b["log"] = np.log(b["dC 2e3e exh"])
b["std log"] = b['std dC 2e3e exh']/b["dC 2e3e exh"]
b = b.dropna()
#%%%%% Rename columns to usual nomenclature
b = b.rename(columns = {'CO2_ppm':'CO2_ppm_reg', 'dC 2e3e exh':'CO2_ppm', 'std dC 2e3e exh': 's_meas'})
#%%%%% Start of integral calculation
# ### Calculating the measurement uncertainty based on the uncertainties of the reference sensors and the deviation of the sensor during its calibration
# b["s_meas"] = np.sqrt(np.square((b["CO2_ppm"] * accuracy2))
# + np.square(accuracy1) + np.square((a["CO2_ppm"] * accuracy4))
# + np.square(accuracy3) + np.square((a["CO2_ppm"] * accuracy6))
# + np.square(accuracy5)+ np.square(res.loc[0, "rse"]))
ns_meas = b['s_meas'].mean()
n = len(b['s_meas'])
# the following parameters have already been set global
# global sa_num, s_lambda, s_phi_e
# global area_sup, s_rest, s_total, a_rest, a_tot,sa_num,s_lambda, s_phi_e,s_rest, sa_rest, s_area
### ISO 16000-8 option to calculate slope (defined to be calculated by Spread-Sheat/Excel)
b["t-te"] = b["runtime"] - b["runtime"].iloc[len(b)-1]
b["lnte/t"] = b["log"].iloc[len(b)-1] - b["log"] # @DRK: The slope (as defined in ISO 16000-8) was always negative since the two subtrahend where in the wrong order.
b["slope"] = b["lnte/t"] / b["t-te"]
try:
if method=='iso':
slope = b["slope"].mean()
sumconz = b["CO2_ppm"].iloc[1:-1].sum()
area_sup = (diff * (b["CO2_ppm"][0]/2 + sumconz + b["CO2_ppm"][len(b)-1]/2))
print('ATTENTION: ISO 16000-8 method has a weak uncertainty evaluation consider using trapezoidal method is correcting this.')
elif method=='trapez':
### More acurate option to calculate the solpe of each (sub-)curve
x1 = b["runtime"].values
y1 = b["log"].values
from scipy.stats import linregress
slope = -linregress(x1,y1)[0]
from numpy import trapz
area_sup = trapz(b["CO2_ppm"].values, dx=diff) # proof that both methods have same answer: area_sup_2 = area_sup_1
print('ATTENTION: Trapezoidal method is used in ISO 16000-8 and here also considered in the uncertainty evaluation. However, more precise results are given by applying the Simpson-Rule.')
elif method=='simpson':
### More acurate option to calculate the solpe of each (sub-)curve
x1 = b["runtime"].values
y1 = b["log"].values
from scipy.stats import linregress
slope = -linregress(x1,y1)[0]
from scipy.integrate import simpson
area_sup = simpson(b["CO2_ppm"].values, dx=diff, even='first') # proof that both methods have same answer: area_sup_2 = area_s
else:
raise ResidenceTimeMethodError
except ResidenceTimeMethodError as err:
print(err)
b["slope"].iloc[len(b)-1] = slope
# tail = a["CO2_ppm"][len(a)-1]/slope
a_rest = b["CO2_ppm"].iloc[-1]/slope
a_tot = area_sup + a_rest
tau2 = a_tot/dC3e.n
b["tau_sec"] = tau2
try:
if method=='iso':
# Taken from DIN ISO 16000-8:2008-12, Equation D2 units are cm3.m-3.sec
sa_num = ns_meas * (diff) * ((n - 1)/np.sqrt(n))
# The uncertainty of the summed trapezoidal method itself is not covered by ISO 16000-8.
sa_tm = 0
elif method=='trapez':
# Actually sa_num (the propagated uncertainty of the measurement) should be calculated this way
sa_num = (diff) * ns_meas * np.sqrt((2*n-1)/2*n)
# Aditionally the summed trapezoidal method itself has an uncertainty as well.
sa_tm = diff**2/12*(b["runtime"].iloc[len(b)-1]-b["runtime"].iloc[0])*b["CO2_ppm"].iloc[0]/tau2**2
elif method=='simpson':
# Actually sa_num (the propagated uncertainty of the measurement) should be calculated this way
sa_num = 1/3*diff*ns_meas*np.sqrt(2+20*round(n/2-0.5))
# Aditionally the summed trapezoidal method itself has an uncertainty as well.
sa_tm = diff**4/2880*(b["runtime"].iloc[len(b)-1]-b["runtime"].iloc[0])*b["CO2_ppm"].iloc[0]/tau2**4
else:
raise ResidenceTimeMethodError
except ResidenceTimeMethodError as err:
print(err)
s_lambda = b["slope"][:-1].std()/abs(b["slope"][:-1].mean())
s_phi_e = b["slope"][:-1].std()/slope
s_rest = np.sqrt(pow(s_lambda,2) + pow(s_phi_e,2))
sa_rest = s_rest * a_rest
s_area = np.sqrt(pow(sa_num,2) + pow(sa_tm,2) + pow(sa_rest,2))/a_tot
# ATTENTION: s_total is a relative uncertainty!
s_total = np.sqrt(pow(s_area,2) + pow(dC3e.s/dC3e.n,2))
b.loc[:, "s_total"] = s_total*tau2
#%%%%% Calculate weighting factor
sup_exh_df = sup_exh_df.set_index('datetime')
dfslice = sup_exh_df[b["datetime"].iloc[0]:b["datetime"].iloc[len(b)-1]]
dfslice = dfslice.filter(['d calc exh-av', 'std d calc exh-av'])
b = b.set_index('datetime')
b = pd.concat([b, dfslice], axis = 1).reset_index()
del dfslice
from scipy.integrate import simpson
area_weight = simpson(b["d calc exh-av"].values, dx=diff, even='first')
# Actually sa_num (the propagated uncertainty of the measurement) should be calculated this way
saw_num = 1/3*diff*np.mean(b["std d calc exh-av"])*np.sqrt(2+20*round(n/2-0.5))
# Aditionally the summed trapezoidal method itself has an uncertainty as well.
saw_tm = diff**4/2880*(b["runtime"].iloc[len(b)-1]-b["runtime"].iloc[0])*ddCmax_exhav["d calc exh-av"]
saw = np.sqrt(pow(saw_num,2) + pow(saw_tm,2))
area_weight = ufloat(area_weight, saw)
weight = area_weight/(ufloat(ddCmax_exhav["d calc exh-av"],ddCmax_exhav["std d calc exh-av"])*b['runtime'].iloc[-1])
b.loc[:, "weight"] = weight.n
b.loc[:, "std weight"] = weight.s
b.loc[:, "Cycle"] = e_cyc
sup_exh_df.reset_index(inplace=True)
#%%%%% Summarise
df_tau_exh.append(b)
else:
if logging:
prRed("Since the exhaust cycle {} has a runtime of {} s it is outside [{}, {}]".format(e_cyc, lenb, lb, ub))
pass
e_cyc = e_cyc + 1
else:
pass
#%%%% Exhaust tau from step-up curves
cyclnr_exh = []
tau_list_exh = []
stot_list_exh = []
weight_list_exh = []
saw_list_exh = []
for jdf in df_tau_exh:
cyclnr_exh.append(jdf["Cycle"][0])
tau_list_exh.append(jdf["tau_sec"][0])
stot_list_exh.append(jdf["s_total"][0])
weight_list_exh.append(jdf["weight"][0])
saw_list_exh.append(jdf["std weight"][0])
df_tau_e = pd.DataFrame({'Cycle':cyclnr_exh,
'tau_exh':tau_list_exh,
'std tau_exh':stot_list_exh,
'weight':weight_list_exh,
'std weight':saw_list_exh})
# Filter outliers (see https://medium.com/@stevenewmanphotography/eliminating-outliers-in-python-with-z-scores-dd72ca5d4ead)
df_tau_e['outliers'] = find_outliers(df_tau_e['tau_exh'])
df_tau_e = df_tau_e[df_tau_e['outliers']==False]
'''
From the plots later on one can clearly see that the residence time
of theexhaust phases increases with the number of cycles of the measu-
rement. This is because over time there are less and lesser marked
fluid elements in the system with low residence times, since they have
already been washed out. The remaining marked (with tracer) elements are
those who have been stagnating or recycled till the current period. As a
consequence it is quite obvious, that the residence time of V2 will
after infinit time approach to the residence time of V3.
The actual mean residence time of V2 is neighter the one measured by
the first period nor by the one after infinit time.
Since it is already known that the residence time of the exhaust phases
will approach to the residence time of V3 the infinit concentration
where the step-up curves are approaching is the exhaust concentration
of V3. However, as just realised the residence times increase from
one cycle to the next cycle. For the tracer measurements this means
that the infinity concentration of a step-up curve should be at least
between the average concentration in the room and the calculated
exhaust concentration of V3. Substracting the average concentration
over time from the exhaust concentration over time gives a function
which has one maximum. Around this maximum the driving concentration
diverence between exhaust air and room average air should be maximal
and therefore the distiction between exhaust air and average room air.
A cfac_e close to 1 will select those calculated residence times form
evaluated cicles around this maximum.
'''
cfac_e = 0.99
df_tau_e2 = df_tau_e[df_tau_e['weight']>cfac_e]
if len(df_tau_e2) == 0:
df_tau_e2 = df_tau_e.nlargest(10, 'weight')
tau_list_exh_u = unumpy.uarray(df_tau_e2['tau_exh'],df_tau_e2['std tau_exh'])
weight_list_exh_u = unumpy.uarray(df_tau_e2['tau_exh'],df_tau_e2['std tau_exh'])
tau_e_u = sum(tau_list_exh_u*weight_list_exh_u)/sum(weight_list_exh_u)
#%%%%% Plot: residence times of the step-up curves during exhaust-phase
if plot:
import plotly.io as pio
pio.renderers.default='browser'
pd.options.plotting.backend = "matplotlib"
#######################################################################
pd.options.plotting.backend = "plotly"
import plotly.io as pio
pio.renderers.default='browser'
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Create figure with secondary y-axis
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(name='Verweilzeit',
x = df_tau_e['Cycle'],
y = df_tau_e['tau_exh'],
error_y=dict(value=df_tau_e['std tau_exh'].max())
),
secondary_y=False,
)
fig.add_trace(go.Scatter(name='Gewichtung',
x = df_tau_e['Cycle'],
y = df_tau_e['weight'],
error_y=dict(value=df_tau_e["std weight"].max())
),
secondary_y=True,
)
fig.update_layout(
title="Abluft",
xaxis_title="Zyklusnummer",
yaxis_title=r'Verweilzeit $\bar{t}_2$',
legend_title="Legende",
font=dict(
family="Segoe UI",
size=18,
color="black"
)
)
fig.show()
import plotly.io as pio
pio.renderers.default='browser'
pd.options.plotting.backend = "matplotlib"
#%%%% Calculating the period number expected delivering the expected mean residence time of V2
'''
From the plots above one can clearly see that the residence time of the
exhaust phases increases with the number of cycles of the measurement.
This is because over time there are less and lesser marked fluid
elements in the system with low residence times since they have
already been washed out. The remaining marked (with tracer) elements are
those who have been stagnating or recycled till the current period. As a
consequence it is quite obvious, that the residence time of V2 will
after infinit time approach to the residence time of V3.
The actual mean residence time of V2 is neighter the one measured by
the first period nor by the one after infinit time. The "mean index"
of the period representing the best value for the residence time of V2
has to be calculated by a similar procedure as the residence times
themsleves.
Since it is already known that the residence time of the exhaust phases
will approach to the residence time of V3 the infinit concentration
where the step-up curves are approaching is the exhaust concentration
of V3. However, as just realised the residence times increase from
one cycle to the next cycle. For the tracer measurements this means
that the infinity concentration of a step-up curve should be at least
between the average concentration in the room and the calculated
exhaust concentration of V3. Substracting the average concentration
over time from the exhaust concentration over time gives a function
which carries information about the size of the interval of possible
concentrations which fullfill this criterion
'''
# df_indexm_exh = []
#%%%%% Calculate Delta tau between exhaust of tau_3 and and exhaust of tau_2
# '''
# '''
# count = 0
# df_tau_e['dtau 2e3e exh'] = pd.Series(dtype='float64')
# df_tau_e['std dtau 2e3e exh'] = pd.Series(dtype='float64')
# while (count < len(b['datetime'])):
# dtau_2e_u = ufloat(df_tau_e["nom"][count],df_tau_e["std"][count])
# value = 2*alpha_mean_u*3600 - dtau_2e_u
# df_tau_e['dtau 2e3e exh'][count] = value.n
# df_tau_e['std dtau 2e3e exh'][count] = value.s
# count = count + 1
# #%%%%% Calculation of the logarithmic concentration curves
# df_tau_e["log"] = np.log(df_tau_e['dtau 2e3e exh'])
# df_tau_e["std log"] = df_tau_e['std dtau 2e3e exh']/df_tau_e['dtau 2e3e exh']
# df_tau_e = df_tau_e.dropna()
# #%%%%% Start of integral calculation
# diff = 1
# ns_meas = df_tau_e['std dtau 2e3e exh'].mean()
# n = len(df_tau_e['std dtau 2e3e exh'])
# # Because the evaluation of the residence times t0 and t0 --> Will be considered differently
# #df_tau_e['index'] = df_tau_e['index'] + fdelay
# df_tau_e['index'] = np.arange(0,len(df_tau_e) * diff, diff)
# ### ISO 16000-8 option to calculate slope (defined to be calculated by Spread-Sheat/Excel)
# df_tau_e["i-ie"] = df_tau_e['index'] - df_tau_e['index'][len(df_tau_e)-1]
# df_tau_e["lnie/i"] = df_tau_e["log"][len(df_tau_e)-1] - df_tau_e["log"] # @DRK: The slope (as defined in ISO 16000-8) was always negative since the two subtrahend where in the wrong order.
# df_tau_e["slope"] = df_tau_e["lnie/i"] / df_tau_e["i-ie"]
# ### More acurate option to calculate the solpe of each (sub-)curve
# x1 = df_tau_e['index'].values
# y1 = df_tau_e["log"].values
# from scipy.stats import linregress
# slope = -linregress(x1,y1)[0]
# from scipy.integrate import simpson
# area_sup = simpson(df_tau_e["dtau 2e3e exh"].values, dx=diff, even='first') # proof that both methods have same answer: area_sup_2 = area_s
# df_tau_e.loc[[len(b)-1], "slope"] = slope
# # tail = a["CO2_ppm"][len(a)-1]/slope
# a_rest = df_tau_e["dtau 2e3e exh"].iloc[-1]/slope
# a_tot = area_sup + a_rest
# indexm2 = a_tot/(2*alpha_mean_u.n*3600)
# df_tau_e["indexm2"] = indexm2
# # Actually sa_num (the propagated uncertainty of the measurement) should be calculated this way
# sa_num = 1/3*diff*ns_meas*np.sqrt(2+20*round(n/2-0.5))
# # Aditionally the summed trapezoidal method itself has an uncertainty as well.
# sa_tm = diff**4/2880*(df_tau_e['index'].loc[len(df_tau_e)-1]-df_tau_e['index'][0])*df_tau_e["dtau 2e3e exh"][0]/indexm2**4
# s_lambda = df_tau_e["slope"][:-1].std()/abs(df_tau_e["slope"][:-1].mean())
# s_phi_e = df_tau_e["slope"][:-1].std()/slope
# s_rest = np.sqrt(pow(s_lambda,2) + pow(s_phi_e,2))
# sa_rest = s_rest * a_rest
# s_area = np.sqrt(pow(sa_num,2) + pow(sa_tm,2) + pow(sa_rest,2))/a_tot
# s_total = np.sqrt(pow(s_area,2) + pow(df_tau_e["std dtau 2e3e exh"][0]/df_tau_e["dtau 2e3e exh"][0],2))
# df_tau_e.loc[:, "s_total"] = s_total
# df_tau_exh.append(b)
#%% returned values
"""
Returns:
t0 = initial timestamp of the start of the experiment
tn = final timestamp of the evaluated data
tau_e = exhaust residence time of the short-cut volume
tau_s = exhaust residence time of the recirculation volume
("supply residence time")
"""
return [database, experiment, aperture_sensor, t0, tn, 2*alpha_mean_u,
tau_e_u, df_tau_e,
tau_s_u, df_tau_s]
#%% residence_Vflow_weighted
def residence_Vflow_weighted(vflow = pd.DataFrame([[30, 60], [5, 10]],
columns=['vol flow', 'std vol flow'],
dtype=('float64')),
resitime = pd.DataFrame([[64, 45], [5, 10]],
columns=['rtime', 'std rtime'],
dtype=('float64'))
):
from uncertainties import unumpy
try:
if len(vflow) == len(resitime):
resitime_u = unumpy.uarray(resitime['rtime'], resitime['std rtime'])
vflow_u = unumpy.uarray(vflow['vol flow'],vflow['std vol flow'])
resitime_u = sum(resitime_u*vflow_u)/sum(vflow_u)
resitime = pd.DataFrame(columns=['rtime', 'std rtime'],
dtype=('float64'))
resitime = pd.DataFrame([{'rtime': resitime_u.n, 'std rtime': resitime_u.s}],dtype=('float64'))
else:
string = 'ValueError: The number of passed volume flows and residence times has to be equal.'
raise ValueError
pass
except ValueError:
prYellow(string)
return resitime
#%% Summarise_vflows
def Summarise_vflows(experiment = "W_I_e0_Herdern"):
experimentglo = CBO_ESHL(experiment = experiment)
dvdt = pd.DataFrame(columns=('experiment','volume_flow','volume_flow_std','level',
'vdot_sup','vdot_sup_std','vdot_exh','vdot_exh_std'))
try:
if 'eshl' in experimentglo.database:
try:
if experimentglo.experiment[2] == 'I':
level = ['Kü_100', 'SZ01_100', 'SZ02_100', 'WZ_100']
for count in range(len(level)):
dvdt = dvdt.append(experimentglo.volume_flow(level_eshl = level[count]), ignore_index=True)
try:
if experimentglo.experiment[4:6] == 'e0':
pass
else:
string1 = 'Only those cases with balanced volume flow settings are yet covered by Summarise_vflows().'
raise ValueError
except ValueError:
prYellow(string1)
elif experimentglo.experiment[2] == 'H':
level = ['Kü_20', 'SZ01_20', 'SZ02_20', 'WZ_20']
for count in range(len(level)):
dvdt = dvdt.append(experimentglo.volume_flow(level_eshl = level[count]), ignore_index=True)
try:
if experimentglo.experiment[4:6] == 'e0':
pass
else:
string2 = 'Only those cases with balanced volume flow settings are yet covered by Summarise_vflows().'
raise ValueError
except ValueError:
prYellow(string2)
pass
else:
string3 = 'CBO_ESHL.experiment has the wrong syntax. The 3rd string element must be "I" for "intensiv ventilation" or "H" for "humidity protection".'
raise NameError
pass
except NameError:
prYellow(string3)
pass
elif 'cbo' in experimentglo.database:
try:
if experimentglo.experiment[2] == 'I':
level = ['K1_St5', 'K2_St5', 'SZ_St5']
for count in range(len(level)):
dvdt = dvdt.append(experimentglo.volume_flow(level_cbo = level[count]), ignore_index=True)
try:
if experimentglo.experiment[4:6] == 'e0':
pass
else:
string4 = 'Only those cases with balanced volume flow settings are yet covered by Summarise_vflows().'
raise ValueError
except ValueError:
prYellow(string4)
elif experimentglo.experiment[2] == 'H':
level = ['K1_St4', 'K2_St4', 'SZ_St4']
for count in range(len(level)):
dvdt = dvdt.append(experimentglo.volume_flow(level_cbo = level[count]), ignore_index=True)
try:
if experimentglo.experiment[4:6] == 'e0':
pass
else:
string5 = 'ValueError: Only those cases with balanced volume flow settings are yet covered by Summarise_vflows().'
raise ValueError
except ValueError:
prYellow(string5)
pass
else:
string6 = 'NameError: CBO_ESHL.experiment has the wrong syntax. The 3rd string element must be "I" for "intensiv ventilation" or "H" for "humidity protection".'
raise NameError
pass
except NameError:
prYellow(string6)
pass
else:
string7 = 'NameError: The current CBO_ESHL.database is not valid. Volumeflows can not be returned CBO_ESHL.volume_flow().'
raise NameError
pass
except NameError:
prYellow(string7)
return dvdt
#%% Summarise_resitimes
def Summarise_resitimes(experiment = "W_I_e0_Herdern"):
experimentglo = CBO_ESHL(experiment = experiment)
time = pd.read_sql_query("SELECT * FROM testdb.timeframes;", con = engine)
#standard syntax to fetch a table from Mysql; In this case a table with the
# short-names of the measurements, all the start and end times, the DB-name
# of the measurement and the required table-names of the DB/schema is loaded into a dataframe.
t = time["timeframes_id"][time.index[time['short_name'].isin([experiment])==True].tolist()[0]]-1
table = time["tables"][t].split(",") #Name of the ventilation device
resitime = pd.DataFrame(index=range(len(table)),
columns=('Sensor',
'av restime_3 in h','std av restime_3 in h',
'av restime_2 in s','std av restime_2 in s',
'av restime_1 in s','std av restime_1 in s'))
for i in range(len(table)):
df = residence_time_sup_exh(experiment=experiment, aperture_sensor = table[i],
periodtime=120,
experimentname=True, plot=False,
export_sublist=False, method='simpson',
filter_maxTrel=0.25, logging=False)
resitime.loc[i] = pd.Series({'Sensor':table[i],
'av restime_3 in h': df[5].n,'std av restime_3 in h': df[5].s,
'av restime_2 in s': df[6].n,'std av restime_2 in s': df[6].s,
'av restime_1 in s': df[8].n,'std av restime_1 in s': df[8].s})
return resitime
#%% check_for_nan
def check_for_nan(numbers = {'set_of_numbers': [1,2,3,4,5,np.nan,6,7,np.nan,8,9,10,np.nan]}):
import pandas as pd
import numpy as np
df = pd.DataFrame(numbers,columns=['set_of_numbers'])
check_for_nan = df['set_of_numbers'].isnull().values.any()
print (check_for_nan)
#%% summary_resitime_vflow
def summary_resitime_vflow(experiment = "W_I_e0_Herdern", reset=False):
import pandas as pd
import pickle as pk
import os.path
experimentglo = CBO_ESHL(experiment = experiment)
try:
if reset:
with open(experiment + "_summary", "wb") as file_summary:
summary = [Summarise_vflows(experiment = experiment),
Summarise_resitimes(experiment = experiment)]
pk.dump(summary, file_summary)
pass
elif os.path.exists(experiment + "_summary"):
with open(experiment + "_summary", "rb") as file_summary:
summary = pk.load(file_summary)
pass
else:
with open(experiment + "_summary", "wb") as file_summary:
summary = [Summarise_vflows(experiment = experiment),
Summarise_resitimes(experiment = experiment)]
pk.dump(summary, file_summary)
string3 = 'No file "{}_summary" found. "summary" has been recreated and saved as "{}_summary".'.format(experiment, experiment)
pass
except IOError:
prYellow(string3)
finally:
file_summary.close()
try:
if os.path.exists(experiment + "_summary_final"):
with open(experiment + "_summary_final", "rb") as file_summary:
summary = pk.load(file_summary)
pass
else:
with open(experiment + "_summary_final", "wb") as file_summary:
try:
if experiment == (summary[0]['experiment'].loc[:]).all():
volume_flow = summary[0]['volume_flow'].loc[0]
std_volume_flow = summary[0]['volume_flow_std'].loc[0]
av_resitime_3_h = summary[1]['av restime_3 in h'].loc[0]
std_av_resitime_3_h = summary[1]['std av restime_3 in h'].loc[0]
del summary[0]['experiment'], summary[0]['volume_flow'], summary[0]['volume_flow_std']
del summary[1]['av restime_3 in h'], summary[1]['std av restime_3 in h']
summary[0] = summary[0].set_index('level')
summary[1] = summary[1].set_index('Sensor')
summary.insert(0, experiment)
summary.insert(1, experimentglo.volume())
summary.insert(2, pd.DataFrame([{'volume_flow': volume_flow,
'std_volume_flow': std_volume_flow}]))
summary.insert(3, pd.DataFrame([{'av restime_3 in h': av_resitime_3_h,
'std av restime_3 in h': std_av_resitime_3_h}]))
pass
else:
string1 = 'ValueError: summary_resitime_vflow() received wrong data.'
raise ValueError
except ValueError:
prYellow(string1)
try:
if 'eshl' in experimentglo.database:
relation = pd.DataFrame(data={'Level':['SZ01_100', 'SZ02_100', 'Kü_100', 'WZ_100','SZ01_20', 'SZ02_20', 'Kü_20', 'WZ_20'],
'Sensor': ['1l', '2l', '3l_kü', '3l_wz', '1l', '2l', '3l_kü', '3l_wz']
})
pass
elif 'cbo' in experimentglo.database:
relation = pd.DataFrame(data={'Level':['K1_St4', 'K1_St4', 'K2_St4', 'SZ_St4', 'K1_St5', 'K1_St5', 'K2_St5', 'SZ_St5'],
'Sensor': ['1l','1l_sub','2l','3l','1l','1l_sub','2l', '3l']
})
pass
else:
string2 = 'NameError: The current CBO_ESHL.database is not valid. Volumeflows can not be returned CBO_ESHL.summary_resitime_vflow().'
raise NameError
pass
except NameError:
prYellow(string2)
relation = pd.MultiIndex.from_frame(relation)
summary[4] = summary[4].reindex(index=relation, level=0)
summary[5] = summary[5].reindex(index=relation, level=1)
summary.insert(6, pd.concat([summary[4], summary[5]],
join="outer", axis=1))
summary[6] = summary[6].dropna()
# del summary[3], summary[4]
#%%% Local residence time dataframes
supplyt = summary[6].loc[:,['av restime_1 in s', 'std av restime_1 in s']]
supplyt = supplyt.reset_index()
del supplyt['Level'], supplyt['Sensor']
supplyt.rename(columns = {'av restime_1 in s':'rtime', 'std av restime_1 in s':'std rtime'}, inplace = True)
exhaustt = summary[6].loc[:,['av restime_2 in s', 'std av restime_2 in s']]
exhaustt = exhaustt.reset_index()
del exhaustt['Level'], exhaustt['Sensor']
exhaustt.rename(columns = {'av restime_2 in s':'rtime', 'std av restime_2 in s':'std rtime'}, inplace = True)
#%%% Local volume flow dataframes
supplyV = summary[6].loc[:,['vdot_sup', 'vdot_sup_std']]
supplyV = supplyV.reset_index()
del supplyV['Level'], supplyV['Sensor']
supplyV.rename(columns = {'vdot_sup':'vol flow', 'vdot_sup_std':'std vol flow'}, inplace = True)
exhuastV = summary[6].loc[:,['vdot_exh', 'vdot_exh_std']]
exhuastV = exhuastV.reset_index()
del exhuastV['Level'], exhuastV['Sensor']
exhuastV.rename(columns = {'vdot_exh':'vol flow', 'vdot_exh_std':'std vol flow'}, inplace = True)
#%%% Calculating the weighted residence times for the whole system
summary.insert(7,residence_Vflow_weighted(supplyV, supplyt))
summary[7].rename(columns = {'rtime':'av t1 in s', 'std rtime':'std av t1 in s'}, inplace = True)
summary.insert(8,residence_Vflow_weighted(exhuastV, exhaustt))
summary[8].rename(columns = {'rtime':'av t2 in s', 'std rtime':'std av t2 in s'}, inplace = True)
#%%% Calculating the short-cut volume V2
tav2 = summary[8]['av t2 in s'].loc[0] # residence time short-cut volume, in s
tav2_std = summary[8]['std av t2 in s'].loc[0]
Vdt23 = summary[2]['volume_flow'].loc[0] # effective volume flow of the ventilation device, in m³/h
Vdt23_std = summary[2]['std_volume_flow'].loc[0]
V23 = summary[1]['Volume V23 in m³'].loc[0] # volume of the ventilated space, in m³
V23_std = summary[1]['std Volume V23 in m³'].loc[0]
alphaav3 = summary[3]['av restime_3 in h'].loc[0]/2 # average air age in the ventilated space, in h
alphaav3_std = summary[3]['std av restime_3 in h'].loc[0]/2
V2 = short_cut_volume(tav2 = tav2, tav2_std = tav2_std,
Vdt23 = Vdt23, Vdt23_std = Vdt23_std,
V23 = V23, V23_std = V23_std,
alphaav3 = alphaav3, alphaav3_std = alphaav3_std)
summary[1] = pd.concat([summary[1], V2],join="outer", axis=1)
#%%% Remaining volume V3 containing the occupied space
V23 = summary[1]['Volume V23 in m³'].loc[0] # volume of the ventilated space, in m³
V23_std = summary[1]['std Volume V23 in m³'].loc[0]
V2 = summary[1]['short-cut volume V2 in m³'].loc[0] # volume of the ventilated space, in m³
V2_std = summary[1]['std short-cut volume V2 in m³'].loc[0]
V3 = occupied_volume(V23, V23_std, V2, V2_std)
summary[1] = pd.concat([summary[1], V3],join="outer", axis=1)
#%%% Volume flow circulating through the volume of the occupied space
V3 = summary[1]['occupied volume V3 in m³'].loc[0] # volume of the ventilated space, in m³
V3_std = summary[1]['std occupied volume V3 in m³'].loc[0]
alphaav3 # residence time of the occupied space, in h
alphaav3_std
Vdt3 = occupied_volumeflow(V3, V3_std, alphaav3, alphaav3_std)
summary[2] = | pd.concat([summary[2], Vdt3],join="outer", axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 15:31:43 2020
@author: afo
"""
import pandas as pd
import os
from os import listdir
from os.path import abspath, isfile, join
from inspect import getsourcefile
import numpy as np
p = abspath(getsourcefile(lambda:0))
p = p.rsplit('/', 1)[0]
os.chdir(p)
print('Working Directory is: %s' % os.getcwd())
# Function calculates mean ratios accross all companies of interest. This to be used as a soft benchmark
def get_means():
# list of each file with financials
files = [f for f in listdir(p + '/annual_financials_tech') if isfile(join(p + '/annual_financials_tech', f))]
try :
files.remove('.DS_Store')
except ValueError:
print()
# lists for each of the ratios
eps = []
epsGr = []
pe = []
pb = []
ps = []
divPyr = []
divYield = []
roe = []
roa = []
opIncR = []
opIncGr = []
netPrMar = []
cashR = []
currentR = []
de = []
da = []
intCov = []
asTurn = []
payTurn = []
asGr = []
fcfGr = []
peg = []
i = 0
# Main loop for going through each file and exctracting ratios
for i in range(0, len(files)):
xls = pd.ExcelFile( p + '/annual_financials_tech/' + files[i])
#xls = pd.ExcelFile( p + '/annual_financials_tech/MSFT.xlsx')
balance_sheet = pd.read_excel(xls, 'BS', index_col=0)
income_statement = pd.read_excel(xls, 'IS', index_col=0)
cash_flow = | pd.read_excel(xls, 'CF', index_col=0) | pandas.read_excel |
"""
Copyright (c) 2021, FireEye, Inc.
Copyright (c) 2021 <NAME>
"""
import os
# noinspection PyUnresolvedReferences,PyPackageRequirements
import ember
import joblib
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.preprocessing import KBinsDiscretizer
from mw_backdoor import ember_feature_utils, constants
# FEATURES
def load_features(feats_to_exclude, dataset='ember', selected=False, vrb=False):
""" Load the features and exclude those in list.
:param feats_to_exclude: (list) list of features to exclude
:param dataset: (str) name of the dataset being used
:param selected: (bool) if true load only Lasso selected features for Drebin
:param vrb: (bool) if true print debug strings
:return: (dict, array, dict, dict) feature dictionaries
"""
if dataset == 'ember':
feature_names = np.array(ember_feature_utils.build_feature_names())
non_hashed = ember_feature_utils.get_non_hashed_features()
hashed = ember_feature_utils.get_hashed_features()
elif dataset == 'pdf' or dataset == 'ogcontagio':
feature_names, non_hashed, hashed = load_pdf_features()
elif dataset == 'drebin':
feature_names, non_hashed, hashed, feasible = load_drebin_features(feats_to_exclude, selected)
else:
raise NotImplementedError('Dataset {} not supported'.format(dataset))
feature_ids = list(range(feature_names.shape[0]))
# The `features` dictionary will contain only numerical IDs
features = {
'all': feature_ids,
'non_hashed': non_hashed,
'hashed': hashed
}
name_feat = dict(zip(feature_names, feature_ids))
feat_name = dict(zip(feature_ids, feature_names))
if dataset != 'drebin':
feasible = features['non_hashed'].copy()
for u_f in feats_to_exclude:
feasible.remove(name_feat[u_f])
features['feasible'] = feasible
if vrb:
print(
'Total number of features: {}\n'
'Number of non hashed features: {}\n'
'Number of hashed features: {}\n'
'Number of feasible features: {}\n'.format(
len(features['all']),
len(features['non_hashed']),
len(features['hashed']),
len(features['feasible'])
)
)
print('\nList of non-hashed features:')
print(
['{}: {}'.format(f, feat_name[f]) for f in features['non_hashed']]
)
print('\nList of feasible features:')
print(
['{}: {}'.format(f, feat_name[f]) for f in features['feasible']]
)
return features, feature_names, name_feat, feat_name
def load_pdf_features():
""" Load the PDF dataset feature list
:return: (ndarray) array of feature names for the pdf dataset
"""
arbitrary_feat = [
'author_dot',
'keywords_dot',
'subject_dot',
'author_lc',
'keywords_lc',
'subject_lc',
'author_num',
'keywords_num',
'subject_num',
'author_oth',
'keywords_oth',
'subject_oth',
'author_uc',
'keywords_uc',
'subject_uc',
'createdate_ts',
'moddate_ts',
'title_dot',
'createdate_tz',
'moddate_tz',
'title_lc',
'creator_dot',
'producer_dot',
'title_num',
'creator_lc',
'producer_lc',
'title_oth',
'creator_num',
'producer_num',
'title_uc',
'creator_oth',
'producer_oth',
'version',
'creator_uc',
'producer_uc'
]
feature_names = np.load('saved_files/pdf_features.npy')
non_hashed = [np.searchsorted(feature_names, f) for f in sorted(arbitrary_feat)]
hashed = list(range(feature_names.shape[0]))
hashed = list(set(hashed) - set(non_hashed))
return feature_names, non_hashed, hashed
def build_feature_names(dataset='ember'):
""" Return the list of feature names for the specified dataset.
:param dataset: (str) dataset identifier
:return: (list) list of feature names
"""
features, feature_names, name_feat, feat_name = load_features(
feats_to_exclude=[],
dataset=dataset
)
return feature_names.tolist()
def load_drebin_features(infeas, selected=False):
""" Return the list of Drebin features.
Due to the huge number of features we will use the vectorizer file saved
during the preprocessing.
:return:
"""
prefixes = {
'activity': 'manifest',
'api_call': 'code',
'call': 'code',
'feature': 'manifest',
'intent': 'manifest',
'permission': 'manifest',
'provider': 'manifest',
'real_permission': 'code',
'service_receiver': 'manifest',
'url': 'code'
}
vec_file = os.path.join(constants.DREBIN_DATA_DIR, 'vectorizer.pkl')
s_feat_file = os.path.join(constants.DREBIN_DATA_DIR, 's_feat_sel.npy')
# Check if the vectorizer file is available, otherwise create it
if not os.path.isfile(vec_file):
load_drebin_dataset(selected=selected)
if selected and not os.path.isfile(s_feat_file):
load_drebin_dataset(selected=selected)
vectorizer = joblib.load(vec_file)
feature_names = np.array(sorted(list(vectorizer.vocabulary_.keys())))
if selected:
s_f = np.load(s_feat_file)
feature_names = feature_names[s_f]
n_f = feature_names.shape[0]
feasible = [i for i in range(n_f) if feature_names[i].split('::')[0] not in infeas]
hashed = [i for i in range(n_f) if prefixes[feature_names[i].split('::')[0]] == 'code']
non_hashed = [i for i in range(n_f) if prefixes[feature_names[i].split('::')[0]] == 'manifest']
return feature_names, non_hashed, hashed, feasible
# DATA SETS
def load_dataset(dataset='ember', selected=False):
if dataset == 'ember':
x_train, y_train, x_test, y_test = load_ember_dataset(True)
elif dataset == 'ogcontagio':
x_train, y_train, x_test, y_test = load_pdf_dataset()
elif dataset == 'drebin':
x_train, y_train, x_test, y_test = load_drebin_dataset(selected)
else:
raise NotImplementedError('Dataset {} not supported'.format(dataset))
return x_train, y_train, x_test, y_test
# noinspection PyBroadException
def load_ember_dataset(binarized=False):
""" Return train and test data from EMBER.
:return: (array, array, array, array)
"""
# Perform feature vectorization only if necessary.
try:
if not binarized:
x_train, y_train, x_test, y_test = ember.read_vectorized_features(
constants.EMBER_DATA_DIR,
feature_version=1
)
else:
x_train = np.load(os.path.join(constants.EMBER_DATA_DIR, "x_train.npy"))
x_test = np.load(os.path.join(constants.EMBER_DATA_DIR, "x_test.npy"))
y_train = np.load(os.path.join(constants.EMBER_DATA_DIR, "y_train.npy"))
y_test = np.load(os.path.join(constants.EMBER_DATA_DIR, "y_test.npy"))
print("load binarized=%s" % str(binarized))
except:
ember.create_vectorized_features(
constants.EMBER_DATA_DIR,
feature_version=1
)
x_train, y_train, x_test, y_test = ember.read_vectorized_features(
constants.EMBER_DATA_DIR,
feature_version=1
)
x_train = x_train.astype(dtype='float64')
x_test = x_test.astype(dtype='float64')
# Get rid of unknown labels
x_train = x_train[y_train != -1]
y_train = y_train[y_train != -1]
x_test = x_test[y_test != -1]
y_test = y_test[y_test != -1]
return x_train, y_train, x_test, y_test
def load_pdf_dataset():
mw_file = 'ogcontagio_mw.npy'
gw_file = 'ogcontagio_gw.npy'
# Load malicious
mw = np.load(
# os.path.join(constants.SAVE_FILES_DIR, mw_file),
os.path.join('data/', mw_file),
allow_pickle=True
).item()
mwdf = pd.DataFrame(mw)
mwdf = mwdf.transpose()
mwdf['class'] = [True] * mwdf.shape[0]
mwdf.index.name = 'filename'
mwdf = mwdf.reset_index()
train_mw, test_mw = train_test_split(mwdf, test_size=0.4, random_state=42)
# Load benign
gw = np.load(
# os.path.join(constants.SAVE_FILES_DIR, gw_file),
os.path.join('data/', gw_file),
allow_pickle=True
).item()
gwdf = pd.DataFrame(gw)
gwdf = gwdf.transpose()
gwdf['class'] = [False] * gwdf.shape[0]
gwdf.index.name = 'filename'
gwdf = gwdf.reset_index()
train_gw, test_gw = train_test_split(gwdf, test_size=0.4, random_state=42)
# Merge dataframes
train_df = | pd.concat([train_mw, train_gw]) | pandas.concat |
import pandas as pd
import numpy as np
import argparse
from pathlib import Path
import json
from sklearn.preprocessing import RobustScaler
from flaml.default import greedy
from flaml.default.regret import load_result, build_regret
regret_bound = 0.01
def config_predictor_tuple(tasks, configs, meta_features, regret_matrix):
"""Config predictor represented in tuple.
The returned tuple consists of (meta_features, preferences, proc).
Returns:
meta_features_norm: A dataframe of normalized meta features, each column for a task.
preferences: A dataframe of sorted configuration indicies by their performance per task (column).
regret_matrix: A dataframe of the configuration(row)-task(column) regret matrix.
"""
# pre-processing
scaler = RobustScaler()
meta_features_norm = meta_features.loc[tasks] # this makes a copy
meta_features_norm.loc[:, :] = scaler.fit_transform(meta_features_norm)
proc = {
"center": scaler.center_.tolist(),
"scale": scaler.scale_.tolist(),
}
# best model for each dataset in training
# choices = regret_matrix[tasks].loc[configs].reset_index(drop=True).idxmin()
# break ties using the order in configs
regret = (
regret_matrix[tasks]
.loc[configs]
.reset_index(drop=True)
.apply(lambda row: row.apply(lambda x: (x, row.name)), axis=1)
)
print(regret)
preferences = np.argsort(regret, axis=0)
print(preferences)
return (meta_features_norm, preferences, proc)
def build_portfolio(meta_features, regret, strategy):
"""Build a portfolio from meta features and regret matrix.
Args:
meta_features: A dataframe of metafeatures matrix.
regret: A dataframe of regret matrix.
strategy: A str of the strategy, one of ("greedy", "greedy-feedback").
"""
assert strategy in ("greedy", "greedy-feedback")
if strategy == "greedy":
portfolio = greedy.construct_portfolio(regret, None, regret_bound)
elif strategy == "greedy-feedback":
portfolio = greedy.construct_portfolio(regret, meta_features, regret_bound)
if "default" not in portfolio and "default" in regret.index:
portfolio += ["default"]
return portfolio
def load_json(filename):
"""Returns the contents of json file filename."""
with open(filename, "r") as f:
return json.load(f)
def _filter(preference, regret):
"""Remove choices after default or have NaN regret."""
try:
last = regret.index.get_loc("default") # len(preference) - 1
preference = preference[: preference[preference == last].index[0] + 1]
except KeyError: # no "default"
pass
finally:
regret = regret.reset_index(drop=True)
preference = preference[regret[preference].notna().to_numpy()]
# regret = regret[preference].reset_index(drop=True)
# dup = regret[regret.duplicated()]
# if not dup.empty:
# # break ties using the order in configs
# unique = dup.drop_duplicates()
# for u in unique:
# subset = regret == u
# preference[subset].sort_values(inplace=True)
# # raise ValueError(preference)
return preference.tolist()
def serialize(configs, regret, meta_features, output_file, config_path):
"""Store to disk all information FLAML-metalearn needs at runtime.
configs: names of model configs
regret: regret matrix
meta_features: task metafeatures
output_file: filename
config_path: path containing config json files
"""
output_file = Path(output_file)
# delete if exists
try:
output_file.unlink()
except FileNotFoundError:
pass
meta_features_norm, preferences, proc = config_predictor_tuple(
regret.columns, configs, meta_features, regret
)
portfolio = [load_json(config_path.joinpath(m + ".json")) for m in configs]
regret = regret.loc[configs]
from flaml import __version__
meta_predictor = {
"version": __version__,
"meta_feature_names": list(meta_features.columns),
"portfolio": portfolio,
"preprocessing": proc,
"neighbors": [
{"features": tuple(x), "choice": _filter(preferences[y], regret[y])}
for x, y in zip(
meta_features_norm.to_records(index=False), preferences.columns
)
],
"configsource": list(configs),
}
with open(output_file, "w+") as f:
json.dump(meta_predictor, f, indent=4)
return meta_predictor
# def analyze(regret_matrix, meta_predictor):
# tasks = regret_matrix.columns
# neighbors = meta_predictor["neighbors"]
# from sklearn.neighbors import NearestNeighbors
# nn = NearestNeighbors(n_neighbors=1)
# for i, task in enumerate(neighbors):
# other_tasks = [j for j in range(len(neighbors)) if j != i]
# # find the nn and the regret
# nn.fit([neighbors[j]["features"] for j in other_tasks])
# dist, ind = nn.kneighbors(
# np.array(task["features"]).reshape(1, -1), return_distance=True
# )
# ind = other_tasks[int(ind.item())]
# choice = int(neighbors[ind]["choice"][0])
# r = regret_matrix.iloc[choice, i]
# if r > regret_bound:
# label = "outlier"
# else:
# label = "normal"
# print(tasks[i], label, tasks[ind], "dist", dist, "regret", r)
# # find the best model and the regret
# regrets = regret_matrix.iloc[other_tasks, i]
# best = regrets.min()
# if best > regret_bound:
# print(tasks[i], "best_regret", best, "task", regrets.idxmin())
def main():
parser = argparse.ArgumentParser(description="Build a portfolio.")
parser.add_argument(
"--strategy", help="One of {greedy, greedy-feedback}", default="greedy"
)
parser.add_argument("--input", help="Input path")
parser.add_argument("--metafeatures", help="CSV of task metafeatures")
parser.add_argument("--exclude", help="One task name to exclude (for LOO purposes)")
parser.add_argument("--output", help="Location to write portfolio JSON")
parser.add_argument("--task", help="Task to merge portfolios", default="binary")
parser.add_argument(
"--estimator",
help="Estimators to merge portfolios",
default=["lgbm", "xgboost"],
nargs="+",
)
args = parser.parse_args()
meta_features = | pd.read_csv(args.metafeatures, index_col=0) | pandas.read_csv |
# coding: utf-8
# In[1]:
# get_ipython().magic(u'matplotlib inline')
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import numpy.random as npr
from sklearn.cluster import KMeans
from scipy.stats import invgamma
from scipy import sparse, stats
# plt.style.use('ggplot')
# In[2]:
# import seaborn as sns
# sns.set_style("white")
# sns.set_context("paper")
# color_names = ["red",
# "windows blue",
# "medium green",
# "dusty purple",
# "orange",
# "amber",
# "clay",
# "pink",
# "greyish",
# "light cyan",
# "steel blue",
# "forest green",
# "pastel purple",
# "mint",
# "salmon",
# "dark brown"]
# colors = sns.xkcd_palette(color_names)
# In[3]:
DATA_DIR = '../dat/raw/Webscope_R3'
# In[4]:
OUT_DATA_DIR = '../dat/proc/R3_wg'
# ## R3
# In[5]:
tr_vd_data = pd.read_csv(os.path.join(DATA_DIR, 'ydata-ymusic-rating-study-v1_0-train.txt'), sep="\t", header=None,
names=['userId', 'songId', 'rating'],engine="python")
test_data = pd.read_csv(os.path.join(DATA_DIR, 'ydata-ymusic-rating-study-v1_0-test.txt'), sep="\t", header=None,
names=['userId', 'songId', 'rating'],engine="python")
# In[6]:
tr_vd_data.head(), tr_vd_data.shape
# In[7]:
test_data.head(), test_data.shape
# In[8]:
def split_train_test_proportion(data, uid, test_prop=0.5, random_seed=0):
data_grouped_by_user = data.groupby(uid)
tr_list, te_list = list(), list()
np.random.seed(random_seed)
for u, (_, group) in enumerate(data_grouped_by_user):
n_items_u = len(group)
if n_items_u >= 5:
idx = np.zeros(n_items_u, dtype='bool')
idx[np.random.choice(n_items_u, size=int(test_prop * n_items_u), replace=False).astype('int64')] = True
tr_list.append(group[np.logical_not(idx)])
te_list.append(group[idx])
else:
tr_list.append(group)
if u % 5000 == 0:
print("%d users sampled" % u)
sys.stdout.flush()
data_tr = pd.concat(tr_list)
data_te = pd.concat(te_list)
return data_tr, data_te
# In[9]:
def get_count(tp, id):
playcount_groupbyid = tp[[id]].groupby(id, as_index=False)
count = playcount_groupbyid.size()
return count
# In[10]:
user_activity = get_count(tr_vd_data, 'userId')
item_popularity = get_count(tr_vd_data, 'songId')
# In[11]:
unique_uid = user_activity.index
unique_sid = item_popularity.index
# In[12]:
n_users = len(unique_uid)
n_items = len(unique_sid)
# In[13]:
n_users, n_items
# In[14]:
song2id = dict((sid, i) for (i, sid) in enumerate(unique_sid))
user2id = dict((uid, i) for (i, uid) in enumerate(unique_uid))
# In[15]:
# for the test set, only keep the users/items from the training set
test_data = test_data.loc[test_data['userId'].isin(unique_uid)]
test_data = test_data.loc[test_data['songId'].isin(unique_sid)]
# In[16]:
with open(os.path.join(OUT_DATA_DIR, 'unique_uid.txt'), 'w') as f:
for uid in unique_uid:
f.write('%s\n' % uid)
with open(os.path.join(OUT_DATA_DIR, 'unique_sid.txt'), 'w') as f:
for sid in unique_sid:
f.write('%s\n' % sid)
# # Turn userId and songId to 0-based index
# In[17]:
def numerize(tp):
uid = list(map(lambda x: user2id[x], tp['userId']))
sid = list(map(lambda x: song2id[x], tp['songId']))
tp.loc[:, 'uid'] = uid
tp.loc[:, 'sid'] = sid
return tp[['uid', 'sid', 'rating']]
# In[18]:
tr_vd_data = numerize(tr_vd_data)
test_data = numerize(test_data)
# In[19]:
train_data, vad_data = split_train_test_proportion(tr_vd_data, 'uid', test_prop=0.6, random_seed=12345)
obs_test_data, vad_data = split_train_test_proportion(vad_data, 'uid', test_prop=0.5, random_seed=12345)
# In[20]:
print("There are total of %d unique users in the training set and %d unique users in the entire dataset" % (len( | pd.unique(train_data['uid']) | pandas.unique |
from fattails import metrics
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
class TestMad:
"""Test the mean absolute deviation method"""
def test_example(self):
x = [0, 5, -5, 0, 0]
mad_ = metrics.mad(x)
expected_mad = 2
assert mad_ == expected_mad
def test_handles_mad_of_zero(self):
x = [1, 1, 1, 1, 1]
x = np.array(x)
mad_ = metrics.mad(x)
assert mad_ == 0
@pytest.mark.parametrize("description, input_data, expected_output", [
("duplicate_values", [2, 2, 3], [0.75, 0.5, 0.25]),
("negative_values", [-1, -0.3, 7], [0.75, 0.5, 0.25]),
("not_sorted_values", [2, 3, 2], [0.75, 0.25, 0.5]),
])
class TestGetSurvivalProbability:
def test_accepts_list_input(self, description,
input_data, expected_output):
"""List input data should be accepted even though
output is always a pandas series."""
output = metrics.get_survival_probability(input_data)
assert output.name == 'survival_probability'
assert output.to_list() == expected_output
def test_accepts_series_input(self, description,
input_data, expected_output):
# Setup
index = pd.date_range('2000-01-01', periods=len(input_data))
# Input series
input_name = 'name_placeholder'
input_data = pd.Series(input_data, index, name=input_name)
# Expected output
expected_name = 'survival_probability'
expected = pd.Series(expected_output, index, name=expected_name)
output = metrics.get_survival_probability(input_data)
assert_series_equal(output, expected)
class TestCalculateMoments:
@pytest.mark.parametrize("description, input_data, expected_output", [
("simple_values", [1, 2, 3], {'moment_1': [1, 2, 3],
'moment_2': [1, 4, 9],
'moment_3': [1, 8, 27]}),
("negative_values", [-1, 2, -3], {'moment_1': [1, 2, 3],
'moment_2': [1, 4, 9],
'moment_3': [1, 8, 27]}),
])
def test_gives_expected_output(self, description,
input_data, expected_output):
# Set up
index = | pd.date_range('2000-01-01', periods=3) | pandas.date_range |
#!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 09:20:45 2021
@author: <NAME>
"""
import argparse
import pandas as pd
import os
import subprocess
import re
import sys
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument("-i","--infile", type=str, help="Input nucletide alignment .fna.")
parser.add_argument("-o", "--outdir", type=str, help="Name of the output directory.", default='')
# parser.add_argument("-d", "--delim", type=str, help="Column delimiter of the fasta header. Default '\t'",default='\t')
# parser.add_argument("-p", "--position", type=int, help="Position of geneID in fasta header. Default = 0", default=0)
parser.add_argument("-t", "--threads", type=str, help="Number of threads used by MAFFT. Default = -1 (all)", default=-1)
parser.add_argument("-k", "--keepduplicates", type=str2bool, help="Keep duplicate sequences. Default = False", default=False)
parser.add_argument("-c", "--consensusthreshold", type=float, help="Consensus threshold bitween 0 and 1 with 1 beeing a perfect consensus. Default = 0.95", default=0.95)
parser.add_argument("-g", "--gapthreshold", type=float, help="Percentage of gaps in sequences to be considered as partial sequence for removal. Default = 0.2", default=0.2)
parser.add_argument("-x", "--primer3", type=str, help="Primer3 input parameter file.")
parser.add_argument("--primers", type=str, help="Known primers for visualization in primer plot in .fasta format.", default="")
parser.add_argument("--negativesequences", type=str, help="File with sequences that get their consensus sequence added to the final alignment .fna.", default='')
args = parser.parse_args()
# splitting the fasta header is not really needed
# delim = args.delim
delim = '\t'
# idpos = args.position
idpos = 0
alignment = args.infile
threshold = args.consensusthreshold
gapthreshold = args.gapthreshold
keepduplicates = args.keepduplicates
primer3file = args.primer3
primers = args.primers
negativesequencefile = args.negativesequences
threads = str(args.threads)
if args.outdir:
outdir = args.outdir
if not os.path.isabs(outdir):
outdir = f'{os.getcwd()}/{args.outdir}'
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.getcwd()
if not os.path.exists(outdir + '/results'):
os.makedirs(outdir + '/results')
outdir = outdir + '/results'
print(f'\nOut dir set to : {outdir}\n')
if not os.path.exists(outdir + '/primer3_runfiles'):
os.makedirs(outdir + '/primer3_runfiles')
primer3_runfiles_dir = outdir + '/primer3_runfiles'
if not os.path.exists(outdir + '/primer3_results'):
os.makedirs(outdir + '/primer3_results')
primer3_result_dir = outdir + '/primer3_results'
def read_fasta(multifasta, delim, idpos):
"""reading input fasta file
returns fasta dictionary with key=accessionNumber and value=Sequence
returns fastaheader dictionary with key=accesstionNumber and value=originalFastaHeader"""
fasta = {}
fastaheader = {}
with open(multifasta, 'r') as infile:
acNumber = ''
for line in infile:
if line.startswith('>'):
if delim:
acNumber = line.split(delim)[idpos].strip().strip('>')
fastaheader[acNumber] = line.strip()
else:
acNumber = line.split()[idpos].strip().strip('>')
fastaheader[acNumber] = line.strip()
else:
if acNumber in fasta:
fasta[acNumber] += line.strip().upper()
else:
fasta[acNumber] = line.strip().upper()
return fasta, fastaheader
def avg(liste):
return sum(liste)/len(liste)
# function to add line breaks to sequence output for nicely formatted alignment files
def insert_newlines(string, linelen=64): # linelen= 64 is recommended for a well readable file.
return '\n'.join(string[i:i+linelen] for i in range(0, len(string), linelen))
# function to build mafft alignments
def align(infile,outfile):
with open(outfile, 'w') as outfile:
subprocess.call(['mafft','--auto' , '--adjustdirection', '--reorder', '--thread', threads, infile], stdout=outfile, stderr=subprocess.DEVNULL)
# reverse complement
def revcomp(seq):
seq = seq.upper().replace("-","")
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
return("".join(complement.get(base, base) for base in reversed(seq)))
# create html table from array of arrays [[row1,element2],[row2,element2]] with optional header row [colheader1,colheader2]
def to_html_table(table,header=[]):
html_table = '<table border=1>\n'
if header:
html_table += '<tr><th>' + '</td><th>'.join([str(x) for x in header]) + '</th></tr>\n'
for row in table:
html_table += '<tr><td>' + '</td><td>'.join([str(x) for x in row]) + '</td></tr>\n'
html_table += '</table>\n'
return(html_table)
# create table for Number of Sequences
alignment_statistic_table = []
# read input fasta/alignment
fasta, fastaheader = read_fasta(alignment, delim, idpos)
number_of_sequences = len(fasta)
alignment_statistic_table.append(['Input sequences', number_of_sequences])
print(f'Number of sequences: {number_of_sequences}')
# make normal fasta from alignment if alignment was submitted
for gene in fasta:
fasta[gene] = fasta[gene].replace('-','')
# build base alignment
print(f'Building and writing base alignment to: {outdir}/base_alignment.fna')
outfile = f'{outdir}/base_fasta.fna'
with open(outfile, 'w') as outfile:
for header in fasta:
outfile.write(f'>{header}\n')
outfile.write(f'{insert_newlines(fasta[header])}\n')
align(infile=f'{outdir}/base_fasta.fna', outfile=f'{outdir}/base_alignment.fna')
# read base_alignment fasta
fasta, fastaheader = read_fasta(f'{outdir}/base_alignment.fna', delim, idpos)
# remove duplicates from fasta and build new alignment
unique = ''
if not keepduplicates:
unique = 'unique_'
outfile = f'{outdir}/unique_fasta.fna'
sequences = set()
number_of_uniques = 0
with open(outfile,'w') as outfile:
for header in fasta:
seqeuence_gapless = fasta[header].replace("-","")
if not seqeuence_gapless in sequences:
outfile.write(f'>{header}\n')
outfile.write(f'{insert_newlines(fasta[header])}\n')
sequences.add(seqeuence_gapless)
number_of_uniques += 1
print(f'Building and writing unique alignment to: {outdir}/unique_alignment.fna\n')
if number_of_uniques > 1:
align(infile=f'{outdir}/unique_fasta.fna', outfile=f'{outdir}/unique_alignment.fna')
else:
command = f'cp {outdir}/unique_fasta.fna {outdir}/unique_alignment.fna'
os.system(command)
# read unique_alignment fasta
fasta, fastaheader = read_fasta(f'{outdir}/unique_alignment.fna', delim, idpos)
number_of_sequences = len(fasta)
alignment_statistic_table.append(['Unique sequences', number_of_sequences])
print(f'Number of unique sequences: {number_of_sequences}')
# remove partial sequences
# partial sequences contain more than the defined % threshold of gaps
# might need to be adjusted if the alignment contains a sequence that is much longer than all others for example
tempfasta = dict(fasta)
for header in tempfasta:
seqlen = len(fasta[header])
gaps = fasta[header].count('-')
if gaps / seqlen > gapthreshold:
del fasta[header]
outfile = f'{outdir}/{unique}nopartial_fasta.fna'
with open(outfile,'w') as outfile:
for header in fasta:
outfile.write(f'>{header}\n')
outfile.write(f'{insert_newlines(fasta[header])}\n')
# build new alignment without partial sequences
print(f'Building and writing nonpartial alignment to: {outdir}/{unique}nopartial_alignment.fna\n')
align(infile=f'{outdir}/{unique}nopartial_fasta.fna', outfile=f'{outdir}/{unique}nopartial_alignment.fna')
# read unique_nonpartial_alignment fasta
fasta, fastaheader = read_fasta(f'{outdir}/{unique}nopartial_fasta.fna', delim, idpos)
number_of_sequences = len(fasta)
print(f'Number of {unique}nopartial sequences: {number_of_sequences}\n')
if keepduplicates:
alignment_statistic_table.append(['Non-partial sequences', number_of_sequences])
else:
alignment_statistic_table.append(['Unique-non-partial sequences', number_of_sequences])
# convert fasta to panda dataframe to calculate consensus scores
fasta_for_panda = {}
for gene in fasta:
fasta_for_panda[gene] = list(fasta[gene])
fasta_pd = | pd.DataFrame.from_dict(fasta_for_panda, orient='index') | pandas.DataFrame.from_dict |
import os,pickle
import pandas as pd
from tqdm import tqdm
from Segment.DataProcess.data import WORD_COL, TAG_COL,SEM_SPLIT_SIGNAL
from Segment.DataProcess.data_utils import *
'''
针对pku的数据处理
tag : B-M-E-S
原始数据:
19980101-01-001-002/m 中共中央/nt 总书记/n 、/wu 国家/n 主席/n 江/nrf 泽民/nrg
处理后数据(转成csv文件):
中共中央/nt 总书记/n 、/wu 国家/n 主席/n 江/nrf 泽民/nrg
S B E S S B E B E S B E S B E B E S
'''
SEM_SPLIT_SIGNAL_PKU = SEM_SPLIT_SIGNAL+\
['山西省','辽宁省','吉林省','黑龙江省','江苏省','山东省','福建省','江西省'
'安徽省','河北省','甘肃省','浙江省','河南省','湖南省','广西省',
'四川省','云南省','贵州省','陕西省','湖北省','重庆市','广东省']+\
['县长','所长','院长','处长','部长','总经理','宣传部','科学院','医院','委员会']
SEGMENT_MAX_SENTENCE_LEN_PKU = 100
MAX_SENTEN_LEN = 500
def trans_data(path,save_path,split_param = True):
words = []
tags = []
dict ={}
with open(path,'r',encoding='utf-8') as fr:
for item in tqdm(fr):
temp_list = item.strip().split()
item_list = []
if len(temp_list)>1:
for word in temp_list[1:]:
item_list.append(word.split('/')[0].split('{')[0].replace("[","").strip())
if split_param:
item_list = split_long_paras_into_sentence(item_list,SEM_SPLIT_SIGNAL_PKU,SEGMENT_MAX_SENTENCE_LEN_PKU)
# print(item_list)
for item_l in item_list:
temp = trans_sentence(item_l)
if len(item_l) > 1 and len(temp)<=MAX_SENTEN_LEN:
tags.append(trans_tags(item_l))
words.append(trans_sentence(item_l))
else:
temp = trans_sentence(item_list)
if len(item_list) > 1 and len(temp) <= MAX_SENTEN_LEN:
tags.append(trans_tags(item_list))
words.append(trans_sentence(item_list))
dict = {
WORD_COL:words,
TAG_COL:tags
}
#
data= | pd.DataFrame(dict) | pandas.DataFrame |
import pandas as pd
import datetime
from mimesis import Generic
import random
import numpy as np
products_head = ['ID', 'Name', 'Price', 'Unit Cost', 'Manufacturer']
customers_head = ['id', 'Name', 'Address', 'City', 'Country', 'Website', 'Email', 'Phone', 'Registration Date']
staff_head = ['id', 'Name', 'Title', 'Address', 'Contract Date', 'Telephone', 'Email', 'Termination Date', 'Office', 'Salary']
sales_head = ['Tx Id', 'Customer id', 'Product ID', 'Sales Date', 'Sales Manager', 'Point of Sales', 'Quantity', 'Total Price']
def main():
generate_products(340)
generate_staff(400)
generate_customers(4000)
generate_sales('./products.csv', './customers.csv', './employees.csv', 100000)
def generate_customers(n):
### Initialize timer, mimesis-class and dataframe
begin_timer = datetime.datetime.now()
gen = Generic('en')
df = pd.DataFrame(columns=customers_head)
### Generate information for n customers and add them into dataframe
for i in range(n):
id = 21000 + i
name = gen.business.company()
address = gen.address.address()
city = gen.address.city()
country = gen.address.country()
web = gen.internet.home_page()
email = gen.person.email()
phone = gen.person.telephone()
registered = gen.datetime.datetime()
df.loc[i] = [id, name, address, city, country, web, email, phone, registered]
print(f'Generated customer-table in {datetime.datetime.now() - begin_timer}\n')
df.to_csv('./customers.csv', index=False)
def generate_products(n):
### Initialize timer, mimesis-class and dataframe
begin_timer = datetime.datetime.now()
gen = Generic('en')
df = | pd.DataFrame(columns=products_head) | pandas.DataFrame |
import shutil
import numpy as np
import pytest
from aict_tools.configuration import AICTConfig
@pytest.fixture(scope="function")
def hdf5_file(tmpdir_factory, request):
fn = tmpdir_factory.mktemp("aict_test_data").join("test_file.hdf5")
shutil.copy("examples/gamma.hdf5", fn)
return fn, "events", AICTConfig.from_yaml("examples/config_energy.yaml")
@pytest.fixture(scope="function")
def cta_file(tmpdir_factory, request):
fn = tmpdir_factory.mktemp("aict_test_data").join("cta_file_test.h5")
shutil.copy("examples/cta_gammas_diffuse.dl1.h5", fn)
return fn
@pytest.fixture(scope="session")
def fact_config():
from aict_tools.configuration import AICTConfig
return AICTConfig.from_yaml("examples/config_energy.yaml")
@pytest.fixture(scope="session")
def cta_config():
from aict_tools.configuration import AICTConfig
return AICTConfig.from_yaml("examples/cta_full_config.yaml")
def test_read_default_columns(hdf5_file):
from aict_tools.io import read_data, get_column_names_in_file
from pandas.testing import assert_frame_equal
path, table_name, config = hdf5_file
df = read_data(path, table_name)
cols = get_column_names_in_file(path, table_name)
df_all_columns = read_data(path, table_name, columns=cols)
assert_frame_equal(df, df_all_columns)
def test_read_default_columns_chunked(hdf5_file):
from aict_tools.io import read_telescope_data, read_telescope_data_chunked
import pandas as pd
from pandas.testing import assert_frame_equal
path, table_name, config = hdf5_file
generator = read_telescope_data_chunked(path, config, 100)
df_chunked = pd.concat([df for df, _, _ in generator]).reset_index(drop=True)
df = read_telescope_data(path, config).reset_index(drop=True)
assert_frame_equal(df, df_chunked)
def test_read_chunks(hdf5_file):
from aict_tools.io import read_telescope_data_chunked, read_telescope_data
import pandas as pd
from pandas.testing import assert_frame_equal
path, table_name, config = hdf5_file
cols = [
"width",
"length",
]
chunk_size = 125
generator = read_telescope_data_chunked(path, config, chunk_size, cols)
dfs = []
for df, _, _ in generator:
dfs.append(df)
assert not df.empty
df_chunked = pd.concat(dfs).reset_index(drop=True)
df = read_telescope_data(path, config, columns=cols).reset_index(drop=True)
assert_frame_equal(df, df_chunked)
def test_read_chunks_cta_dl1(cta_file, cta_config):
from aict_tools.io import read_telescope_data, read_telescope_data_chunked
import pandas as pd
from pandas.testing import assert_frame_equal
chunk_size = 500
# choose some columns from different tables in the file
columns = [
"true_energy",
"azimuth",
"equivalent_focal_length",
"hillas_width",
"tel_id",
"event_id",
"obs_id",
]
cta_file = str(cta_file)
generator = read_telescope_data_chunked(
cta_file, cta_config, chunk_size, columns=columns
)
df1 = pd.concat([df for df, _, _ in generator])
df2 = read_telescope_data(cta_file, cta_config, columns=columns)
| assert_frame_equal(df1, df2) | pandas.testing.assert_frame_equal |
from numpy.random import default_rng
import numpy as np
import emcee
import pandas as pd
from tqdm.auto import tqdm
from sklearn.preprocessing import StandardScaler
import copy
from scipy.stats import norm, ortho_group
import random
import math
import scipy.stats as ss
"""
A collection of synthetic data generators, including multivariate normal data, data generated with archimedean copulas,
data generated with arbitrary marginals and gaussian copula and data from already existing drift generators.
"""
rng = default_rng()
# three available archimedean copulas
def clayton(theta, n):
v = random.gammavariate(1/theta, 1)
uf = [random.expovariate(1)/v for _ in range(n)]
return [(k+1)**(-1.0/theta) for k in uf]
def amh(theta, n):
# NOTE: Use SciPy RNG for convenience here
v = ss.geom(1-theta).rvs()
uf = [random.expovariate(1)/v for _ in range(n)]
return [(1-theta)/(math.exp(k)-theta) for k in uf]
def frank(theta, n):
v = ss.logser(1-math.exp(-theta)).rvs()
uf = [random.expovariate(1)/v for _ in range(n)]
return [-math.log(1-(1-math.exp(-theta))*(math.exp(-k))/theta) for k in uf]
def new_distribution_cholesky(pre_mean, ch_mean, perturbation=0.1):
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation
cond = 10000
var = None
while cond > 1000:
chol = ortho_group.rvs(len(pre_mean))
var = [email protected]
cond = np.linalg.cond(var)
return pre_mean, var
def new_similar_distribution_cholesky(pre_mean, pre_chol, ch_mean, perturbation=0.1):
"""Problematic, as the resulting cov matrix is almost diagonal!"""
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation # not to change the mean too much
cond = 10000
var = None
while cond > 1000:
chol = pre_chol + np.random.uniform(0, perturbation, (len(pre_mean), len(pre_mean)))
chol = nearest_orthogonal_matrix(chol)
var = [email protected]
cond = np.linalg.cond(var)
return pre_mean, var
def new_distribution_svd(pre_mean, ch_mean, perturbation=0.1, conditioning=1000):
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation
cond = conditioning*100*len(pre_mean)
var = None
while cond > conditioning*10*len(pre_mean) or cond < conditioning*len(pre_mean):
nums = np.random.uniform(0, 1, len(pre_mean)) # change eigenvalues distribution
corr = ss.random_correlation.rvs(nums/sum(nums)*len(pre_mean), random_state=rng)
S = np.diag(np.random.uniform(0, 1, len(pre_mean)))
var = S.T@corr@S
cond = np.linalg.cond(var)
return pre_mean, var
def new_similar_distribution_svd(pre_mean, pre_nums, pre_S, ch_mean, perturbation=0.02):
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation
cond = 10000*len(pre_mean)
var = None
while cond > 1000*len(pre_mean) or cond < 10*len(pre_mean):
nums = pre_nums + np.random.uniform(0, perturbation, len(pre_mean))
corr = ss.random_correlation.rvs(nums/sum(nums)*len(pre_mean), random_state=rng)
S = pre_S + np.diag(np.random.uniform(0, perturbation/2, len(pre_mean)))
var = S.T@corr@S
cond = np.linalg.cond(var)
return pre_mean, var
def new_distribution(pre_mean, pre_cov, ch_mean, ch_cov, change_X=True, change_y=True):
# ch_mean and ch_cov are masks with where to change mean and cov (localised drift)
# important! A complete mask for cov has to be passed, but only the upper triangular part will be considered
if change_y and change_X:
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - 0.5 # not to change the mean too much
pre_cov[ch_cov] = np.random.normal(size=sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
if not np.all(np.linalg.eigvals(pre_cov) > 0):
pre_cov = nearestPD(pre_cov)
elif change_X:
pre_mean_old = pre_mean
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - 0.5 # not to change the mean too much
pre_mean[-1] = pre_mean_old[-1]
pre_cov_old = pre_cov
pre_cov[ch_cov] = np.random.normal(size=sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
pre_cov[-1][-1] = pre_cov_old[-1][-1]
while np.any(np.linalg.eigvals(pre_cov) <= 0):
pre_cov_ = nearestPD(pre_cov)
pre_cov_[-1][-1] = pre_cov_old[-1][-1]
pre_cov = pre_cov_
else: # non mi serve ora fare caso solo y cambia
n_dim = len(pre_cov)
ch_cov = np.array([[False] * int(n_dim)] * int(n_dim), dtype=bool)
ch_cov[:, -1] = [True] * (n_dim-1) + [False]
pre_cov[ch_cov] = np.random.normal(size=sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
pre_cov_old = pre_cov
while np.any(np.linalg.eigvals(pre_cov) <= 0):
pre_cov_ = nearestPD(pre_cov)
# i add a small perturbation to P(X) too, if not I cannot change P(Y|X) without singularity in the cov matrix
pre_cov_[np.invert(ch_cov)] = pre_cov_old[np.invert(ch_cov)]+np.random.normal(size=sum(sum(np.invert(ch_cov))))/20
pre_cov_ = np.tril(pre_cov_.T) + np.triu(pre_cov_, 1)
pre_cov = pre_cov_
return pre_mean, pre_cov
def new_similar_distribution(pre_mean, pre_cov, ch_mean, ch_cov, change_X=True, change_y=True):
# ch_mean and ch_cov are masks with where to change mean and cov (localised drift)
# important! A complete mask for cov has to be passed, but only the upper triangular part will be considered
# new similar distribution, as of now, only permits data drift + covariate drift, unlike abrupt where
# the two can be separated and simulated independently
if change_y:
pre_mean[ch_mean] = pre_mean[ch_mean] + rng.uniform(-0.1, 0.1, size=sum(ch_mean))
pre_cov[ch_cov] = np.reshape(pre_cov[ch_cov], -1) + rng.uniform(-0.1, 0.1, sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
if not np.all(np.linalg.eigvals(pre_cov) > 0):
pre_cov = nearestPD(pre_cov)
else:
pre_mean_old = pre_mean
pre_mean[ch_mean] = pre_mean[ch_mean] + rng.uniform(-0.1, 0.1, size=sum(ch_mean))
pre_mean[-1] = pre_mean_old[-1]
pre_cov_old = pre_cov
pre_cov[ch_cov] = np.reshape(pre_cov[ch_cov], -1) + rng.uniform(-0.1, 0.1, sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
pre_cov[-1][-1] = pre_cov_old[-1][-1]
while np.any(np.linalg.eigvals(pre_cov) <= 0):
pre_cov_ = nearestPD(pre_cov)
pre_cov_[-1][-1] = pre_cov_old[-1][-1]
pre_cov = pre_cov_
return pre_mean, pre_cov
def new_distribution_deprecated(pre_mean, pre_cov, ch_mean, ch_cov):
# ch_mean and ch_cov are masks with where to change mean and cov (localised drift)
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - 0.5
pre_cov[ch_cov] = np.random.random((sum(ch_cov),len(pre_mean)))
pre_cov = nearestPD(pre_cov)
return pre_mean, pre_cov
def lnprob_trunc_norm(x, mean, n_dim, C):
if sum(x) > 0 *n_dim:
return -np.inf
else:
return -0.5 *( x -mean).dot(np.linalg.inv(C)).dot( x -mean)
def truncated_normal_sampling(pre_mean, pre_cov, size, n_dim):
if size <= 0:
return None
if size >= n_dim*2:
pos = emcee.utils.sample_ball(pre_mean, np.sqrt(np.diag(pre_cov)), size=size)
else:
pos = rng.multivariate_normal(pre_mean, pre_cov, size=size)
S = emcee.EnsembleSampler(size, n_dim, lnprob_trunc_norm, args=(pre_mean, n_dim, pre_cov))
pos, prob, state = S.run_mcmc(pos, 100)
# print(np.max(pos))
return pos
def nearestPD(A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = np.linalg.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if isPD(A3):
return A3
spacing = np.spacing(np.linalg.norm(A))
I = np.eye(A.shape[0])
k = 1
while not isPD(A3):
mineig = np.min(np.real(np.linalg.eigvals(A3)))
A3 += I * (-mineig * k**2 + spacing)
k += 1
return A3
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = np.linalg.cholesky(B)
return True
except np.linalg.LinAlgError:
return False
def nearest_orthogonal_matrix(A):
'''
Find closest orthogonal matrix to *A* using iterative method.
Bases on the code from REMOVE_SOURCE_LEAKAGE function from OSL Matlab package.
Args:
A (numpy.array): array shaped k, n, where k is number of channels, n - data points
Returns:
L (numpy.array): orthogonalized matrix with amplitudes preserved
Reading:
<NAME>., A symmetric multivariate leakage correction for MEG connectomes.,
Neuroimage. 2015 Aug 15;117:439-48. doi: 10.1016/j.neuroimage.2015.03.071
'''
#
MAX_ITER = 2000
TOLERANCE = np.max((1, np.max(A.shape) * np.linalg.svd(A.T, False, False)[0])) * np.finfo(A.dtype).eps # TODO
reldiff = lambda a, b: 2 * abs(a - b) / (abs(a) + abs(b))
convergence = lambda rho, prev_rho: reldiff(rho, prev_rho) <= TOLERANCE
A_b = A.conj()
d = np.sqrt(np.sum(A * A_b, axis=1))
rhos = np.zeros(MAX_ITER)
for i in range(MAX_ITER):
scA = A.T * d
u, s, vh = np.linalg.svd(scA, False)
V = np.dot(u, vh)
# TODO check is rank is full
d = np.sum(A_b * V.T, axis=1)
L = (V * d).T
E = A - L
rhos[i] = np.sqrt(np.sum(E * E.conj()))
if i > 0 and convergence(rhos[i], rhos[i - 1]):
break
return L
def generate_normal_drift_data(batch_size, train_size, length, pre_mean_, pre_cov_, ch_mean, ch_cov,
change, n_dim, scale=False, gradual_drift=False, oracle=False, change_X=True,
change_y=True, verbose=False):
"""Generates multivariate normal drifting data"""
if scale:
scaler = StandardScaler()
pre_mean = pre_mean_.copy()
pre_cov = pre_cov_.copy()
df = pd.DataFrame()
means = []
covs = []
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i % change == 0:
pre_mean, pre_cov = new_distribution(pre_mean, pre_cov, ch_mean, ch_cov,
change_X=change_X, change_y=change_y)
if gradual_drift:
pre_mean, pre_cov = new_similar_distribution(np.zeros(n_dim), pre_cov, [False] * n_dim, ch_cov,
change_X=change_X, change_y=change_y)
if i == 0:
data = rng.multivariate_normal(pre_mean, pre_cov, size=train_size)
else:
data = rng.multivariate_normal(pre_mean, pre_cov, size=batch_size)
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
prov["batch"] = i
df = df.append(prov, ignore_index=True)
means.append(list(pre_mean))
covs.append(copy.deepcopy(pre_cov))
df.rename(columns={n_dim - 1: 'label'}, inplace=True)
if oracle:
return df, means, covs
else:
return df
def generate_normal_drift_data_cholesky(batch_size, train_size, length, pre_mean_, pre_chol_, ch_mean,
change, n_dim, scale=False, gradual_drift=False, oracle=False, verbose=False):
"""Generates multivariate normal drifting data -> no correlation! Do not use!!!"""
if scale:
scaler = StandardScaler()
pre_mean = pre_mean_.copy()
pre_chol = pre_chol_.copy()
df = pd.DataFrame()
means = []
covs = []
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i % change == 0:
pre_mean, cov = new_distribution_cholesky(pre_mean, ch_mean)
if gradual_drift:
pre_mean, cov = new_similar_distribution_cholesky(pre_mean, pre_chol, ch_mean)
if i == 0:
data = rng.multivariate_normal(pre_mean, cov, size=train_size)
else:
data = rng.multivariate_normal(pre_mean, cov, size=batch_size)
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
prov["batch"] = i
df = df.append(prov, ignore_index=True)
means.append(list(pre_mean))
covs.append(copy.deepcopy(cov))
df.rename(columns={n_dim - 1: 'label'}, inplace=True)
if oracle:
return df, means, covs
else:
return df
def generate_normal_drift_data_svd(batch_size, train_size, length, pre_mean_, pre_eigs_, pre_S_, ch_mean,
change, n_dim, scale=False, gradual_drift=False, oracle=False, verbose=False):
"""Generates multivariate normal drifting data"""
if scale:
scaler = StandardScaler()
pre_mean = pre_mean_.copy()
pre_eigs = pre_eigs_.copy()
pre_S = pre_S_.copy()
df = pd.DataFrame()
means = []
covs = []
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i % change == 0:
pre_mean, cov = new_distribution_svd(pre_mean, ch_mean)
if gradual_drift:
pre_mean, cov = new_similar_distribution_svd(pre_mean, pre_eigs, pre_S, ch_mean)
if i == 0:
data = rng.multivariate_normal(pre_mean, cov, size=train_size)
else:
data = rng.multivariate_normal(pre_mean, cov, size=batch_size)
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = | pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns) | pandas.DataFrame |
# Copyright (c) 2020, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core import DataFrame, Series
from cudf.tests.utils import (
INTEGER_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
)
def test_series_replace():
a1 = np.array([0, 1, 2, 3, 4])
# Numerical
a2 = np.array([5, 1, 2, 3, 4])
sr1 = Series(a1)
sr2 = sr1.replace(0, 5)
assert_eq(a2, sr2.to_array())
# Categorical
psr3 = pd.Series(["one", "two", "three"], dtype="category")
psr4 = psr3.replace("one", "two")
sr3 = Series.from_pandas(psr3)
sr4 = sr3.replace("one", "two")
assert_eq(psr4, sr4)
psr5 = psr3.replace("one", "five")
sr5 = sr3.replace("one", "five")
assert_eq(psr5, sr5)
# List input
a6 = np.array([5, 6, 2, 3, 4])
sr6 = sr1.replace([0, 1], [5, 6])
assert_eq(a6, sr6.to_array())
with pytest.raises(TypeError):
sr1.replace([0, 1], [5.5, 6.5])
# Series input
a8 = np.array([5, 5, 5, 3, 4])
sr8 = sr1.replace(sr1[:3], 5)
assert_eq(a8, sr8.to_array())
# large input containing null
sr9 = Series(list(range(400)) + [None])
sr10 = sr9.replace([22, 323, 27, 0], None)
assert sr10.null_count == 5
assert len(sr10.to_array()) == (401 - 5)
sr11 = sr9.replace([22, 323, 27, 0], -1)
assert sr11.null_count == 1
assert len(sr11.to_array()) == (401 - 1)
# large input not containing nulls
sr9 = sr9.fillna(-11)
sr12 = sr9.replace([22, 323, 27, 0], None)
assert sr12.null_count == 4
assert len(sr12.to_array()) == (401 - 4)
sr13 = sr9.replace([22, 323, 27, 0], -1)
assert sr13.null_count == 0
assert len(sr13.to_array()) == 401
def test_series_replace_with_nulls():
a1 = np.array([0, 1, 2, 3, 4])
# Numerical
a2 = np.array([-10, 1, 2, 3, 4])
sr1 = Series(a1)
sr2 = sr1.replace(0, None).fillna(-10)
assert_eq(a2, sr2.to_array())
# List input
a6 = np.array([-10, 6, 2, 3, 4])
sr6 = sr1.replace([0, 1], [None, 6]).fillna(-10)
assert_eq(a6, sr6.to_array())
sr1 = Series([0, 1, 2, 3, 4, None])
with pytest.raises(TypeError):
sr1.replace([0, 1], [5.5, 6.5]).fillna(-10)
# Series input
a8 = np.array([-10, -10, -10, 3, 4, -10])
sr8 = sr1.replace(sr1[:3], None).fillna(-10)
assert_eq(a8, sr8.to_array())
a9 = np.array([-10, 6, 2, 3, 4, -10])
sr9 = sr1.replace([0, 1], [None, 6]).fillna(-10)
assert_eq(a9, sr9.to_array())
def test_dataframe_replace():
# numerical
pdf1 = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, 3]})
gdf1 = DataFrame.from_pandas(pdf1)
pdf2 = pdf1.replace(0, 4)
gdf2 = gdf1.replace(0, 4)
assert_eq(gdf2, pdf2)
# categorical
pdf4 = pd.DataFrame(
{"a": ["one", "two", "three"], "b": ["one", "two", "three"]},
dtype="category",
)
gdf4 = DataFrame.from_pandas(pdf4)
pdf5 = pdf4.replace("two", "three")
gdf5 = gdf4.replace("two", "three")
assert_eq(gdf5, pdf5)
# list input
pdf6 = pdf1.replace([0, 1], [4, 5])
gdf6 = gdf1.replace([0, 1], [4, 5])
assert_eq(gdf6, pdf6)
pdf7 = pdf1.replace([0, 1], 4)
gdf7 = gdf1.replace([0, 1], 4)
assert_eq(gdf7, pdf7)
# dict input:
pdf8 = pdf1.replace({"a": 0, "b": 0}, {"a": 4, "b": 5})
gdf8 = gdf1.replace({"a": 0, "b": 0}, {"a": 4, "b": 5})
assert_eq(gdf8, pdf8)
pdf9 = pdf1.replace({"a": 0}, {"a": 4})
gdf9 = gdf1.replace({"a": 0}, {"a": 4})
assert_eq(gdf9, pdf9)
def test_dataframe_replace_with_nulls():
# numerical
pdf1 = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, 3]})
gdf1 = DataFrame.from_pandas(pdf1)
pdf2 = pdf1.replace(0, 4)
gdf2 = gdf1.replace(0, None).fillna(4)
assert_eq(gdf2, pdf2)
# list input
pdf6 = pdf1.replace([0, 1], [4, 5])
gdf6 = gdf1.replace([0, 1], [4, None]).fillna(5)
assert_eq(gdf6, pdf6)
pdf7 = pdf1.replace([0, 1], 4)
gdf7 = gdf1.replace([0, 1], None).fillna(4)
assert_eq(gdf7, pdf7)
# dict input:
pdf8 = pdf1.replace({"a": 0, "b": 0}, {"a": 4, "b": 5})
gdf8 = gdf1.replace({"a": 0, "b": 0}, {"a": None, "b": 5}).fillna(4)
assert_eq(gdf8, pdf8)
gdf1 = DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, None]})
gdf9 = gdf1.replace([0, 1], [4, 5]).fillna(3)
assert_eq(gdf9, pdf6)
def test_replace_strings():
pdf = pd.Series(["a", "b", "c", "d"])
gdf = Series(["a", "b", "c", "d"])
assert_eq(pdf.replace("a", "e"), gdf.replace("a", "e"))
@pytest.mark.parametrize(
"psr",
[
pd.Series([0, 1, None, 2, None], dtype=pd.Int8Dtype()),
pd.Series([0, 1, np.nan, 2, np.nan]),
],
)
@pytest.mark.parametrize("data_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("fill_value", [10, pd.Series([10, 20, 30, 40, 50])])
@pytest.mark.parametrize("inplace", [True, False])
def test_series_fillna_numerical(psr, data_dtype, fill_value, inplace):
test_psr = psr.copy(deep=True)
# TODO: These tests should use Pandas' nullable int type
# when we support a recent enough version of Pandas
# https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
if np.dtype(data_dtype).kind not in ("f") and test_psr.dtype.kind == "i":
test_psr = test_psr.astype(
cudf.utils.dtypes.cudf_dtypes_to_pandas_dtypes[
np.dtype(data_dtype)
]
)
gsr = cudf.from_pandas(test_psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = test_psr.fillna(fill_value, inplace=inplace)
actual = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
expected = test_psr
actual = gsr
# TODO: Remove check_dtype when we have support
# to compare with pandas nullable dtypes
assert_eq(expected, actual, check_dtype=False)
@pytest.mark.parametrize(
"data",
[
[1, None, None, 2, 3, 4],
[None, None, 1, 2, None, 3, 4],
[1, 2, None, 3, 4, None, None],
],
)
@pytest.mark.parametrize("container", [pd.Series, pd.DataFrame])
@pytest.mark.parametrize("data_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("method", ["ffill", "bfill"])
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_method_numerical(data, container, data_dtype, method, inplace):
if container == pd.DataFrame:
data = {"a": data, "b": data, "c": data}
pdata = container(data)
if np.dtype(data_dtype).kind not in ("f"):
data_dtype = cudf.utils.dtypes.cudf_dtypes_to_pandas_dtypes[
np.dtype(data_dtype)
]
pdata = pdata.astype(data_dtype)
# Explicitly using nans_as_nulls=True
gdata = cudf.from_pandas(pdata, nan_as_null=True)
expected = pdata.fillna(method=method, inplace=inplace)
actual = gdata.fillna(method=method, inplace=inplace)
if inplace:
expected = pdata
actual = gdata
assert_eq(expected, actual, check_dtype=False)
@pytest.mark.parametrize(
"psr",
[
pd.Series(["a", "b", "a", None, "c", None], dtype="category"),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["q", "r", "z", "a", "b", "c"],
),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["x", "t", "p", "q", "r", "z"],
),
pd.Series(["a", "b", "a", np.nan, "c", np.nan], dtype="category"),
pd.Series(
[None, None, None, None, None, None, "a", "b", "c"],
dtype="category",
),
],
)
@pytest.mark.parametrize(
"fill_value",
[
"c",
pd.Series(["c", "c", "c", "c", "c", "a"], dtype="category"),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["x", "t", "p", "q", "r", "z"],
),
pd.Series(
["a", "b", "a", None, "c", None],
dtype="category",
index=["q", "r", "z", "a", "b", "c"],
),
pd.Series(["a", "b", "a", None, "c", None], dtype="category"),
pd.Series(["a", "b", "a", np.nan, "c", np.nan], dtype="category"),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_categorical(psr, fill_value, inplace):
gsr = Series.from_pandas(psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = psr.fillna(fill_value, inplace=inplace)
got = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
expected = psr
got = gsr
assert_eq(expected, got)
@pytest.mark.parametrize(
"psr",
[
pd.Series(pd.date_range("2010-01-01", "2020-01-10", freq="1y")),
pd.Series(["2010-01-01", None, "2011-10-10"], dtype="datetime64[ns]"),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"],
),
],
)
@pytest.mark.parametrize(
"fill_value",
[
pd.Timestamp("2010-01-02"),
pd.Series(pd.date_range("2010-01-01", "2020-01-10", freq="1y"))
+ pd.Timedelta("1d"),
pd.Series(["2010-01-01", None, "2011-10-10"], dtype="datetime64[ns]"),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
),
pd.Series(
[
None,
None,
None,
None,
None,
None,
"2011-10-10",
"2010-01-01",
"2010-01-02",
"2010-01-04",
"2010-11-01",
],
dtype="datetime64[ns]",
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"],
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_datetime(psr, fill_value, inplace):
gsr = cudf.from_pandas(psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = psr.fillna(fill_value, inplace=inplace)
got = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
got = gsr
expected = psr
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
# Categorical
pd.Categorical([1, 2, None, None, 3, 4]),
pd.Categorical([None, None, 1, None, 3, 4]),
pd.Categorical([1, 2, None, 3, 4, None, None]),
pd.Categorical(["1", "20", None, None, "3", "40"]),
pd.Categorical([None, None, "10", None, "30", "4"]),
pd.Categorical(["1", "20", None, "30", "4", None, None]),
# Datetime
np.array(
[
"2020-01-01 08:00:00",
"2020-01-01 09:00:00",
None,
"2020-01-01 10:00:00",
None,
"2020-01-01 10:00:00",
],
dtype="datetime64[ns]",
),
np.array(
[
None,
None,
"2020-01-01 09:00:00",
"2020-01-01 10:00:00",
None,
"2020-01-01 10:00:00",
],
dtype="datetime64[ns]",
),
np.array(
[
"2020-01-01 09:00:00",
None,
None,
"2020-01-01 10:00:00",
None,
None,
],
dtype="datetime64[ns]",
),
# Timedelta
np.array(
[10, 100, 1000, None, None, 10, 100, 1000], dtype="datetime64[ns]"
),
np.array(
[None, None, 10, None, 1000, 100, 10], dtype="datetime64[ns]"
),
np.array(
[10, 100, None, None, 1000, None, None], dtype="datetime64[ns]"
),
# String
np.array(
["10", "100", "1000", None, None, "10", "100", "1000"],
dtype="object",
),
np.array(
[None, None, "1000", None, "10", "100", "10"], dtype="object"
),
np.array(
["10", "100", None, None, "1000", None, None], dtype="object"
),
],
)
@pytest.mark.parametrize("container", [pd.Series, pd.DataFrame])
@pytest.mark.parametrize("method", ["ffill", "bfill"])
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_method_fixed_width_non_num(data, container, method, inplace):
if container == pd.DataFrame:
data = {"a": data, "b": data, "c": data}
pdata = container(data)
# Explicitly using nans_as_nulls=True
gdata = cudf.from_pandas(pdata, nan_as_null=True)
expected = pdata.fillna(method=method, inplace=inplace)
actual = gdata.fillna(method=method, inplace=inplace)
if inplace:
expected = pdata
actual = gdata
assert_eq(expected, actual)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, None], "b": [None, None, 5]}),
pd.DataFrame(
{"a": [1, 2, None], "b": [None, None, 5]}, index=["a", "p", "z"]
),
],
)
@pytest.mark.parametrize(
"value",
[
10,
pd.Series([10, 20, 30]),
pd.Series([3, 4, 5]),
pd.Series([10, 20, 30], index=["z", "a", "p"]),
{"a": 5, "b": pd.Series([3, 4, 5])},
{"a": 5001},
{"b": pd.Series([11, 22, 33], index=["a", "p", "z"])},
{"a": 5, "b": pd.Series([3, 4, 5], index=["a", "p", "z"])},
{"c": 100},
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_dataframe(df, value, inplace):
pdf = df.copy(deep=True)
gdf = DataFrame.from_pandas(pdf)
fill_value_pd = value
if isinstance(fill_value_pd, (pd.Series, pd.DataFrame)):
fill_value_cudf = cudf.from_pandas(fill_value_pd)
elif isinstance(fill_value_pd, dict):
fill_value_cudf = {}
for key in fill_value_pd:
temp_val = fill_value_pd[key]
if isinstance(temp_val, pd.Series):
temp_val = cudf.from_pandas(temp_val)
fill_value_cudf[key] = temp_val
else:
fill_value_cudf = value
expect = pdf.fillna(fill_value_pd, inplace=inplace)
got = gdf.fillna(fill_value_cudf, inplace=inplace)
if inplace:
got = gdf
expect = pdf
assert_eq(expect, got)
@pytest.mark.parametrize(
"psr",
[
pd.Series(["a", "b", "c", "d"]),
pd.Series([None] * 4, dtype="object"),
pd.Series(["z", None, "z", None]),
pd.Series(["x", "y", None, None, None]),
pd.Series([None, None, None, "i", "P"]),
],
)
@pytest.mark.parametrize(
"fill_value",
[
"a",
pd.Series(["a", "b", "c", "d"]),
pd.Series(["z", None, "z", None]),
pd.Series([None] * 4, dtype="object"),
pd.Series(["x", "y", None, None, None]),
pd.Series([None, None, None, "i", "P"]),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_fillna_string(psr, fill_value, inplace):
gsr = cudf.from_pandas(psr)
if isinstance(fill_value, pd.Series):
fill_value_cudf = cudf.from_pandas(fill_value)
else:
fill_value_cudf = fill_value
expected = psr.fillna(fill_value, inplace=inplace)
got = gsr.fillna(fill_value_cudf, inplace=inplace)
if inplace:
expected = psr
got = gsr
assert_eq(expected, got)
@pytest.mark.parametrize("data_dtype", INTEGER_TYPES)
def test_series_fillna_invalid_dtype(data_dtype):
gdf = Series([1, 2, None, 3], dtype=data_dtype)
fill_value = 2.5
with pytest.raises(TypeError) as raises:
gdf.fillna(fill_value)
raises.match(
f"Cannot safely cast non-equivalent"
f" {type(fill_value).__name__} to {gdf.dtype.type.__name__}"
)
@pytest.mark.parametrize("data_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("fill_value", [100, 100.0, 128.5])
def test_series_where(data_dtype, fill_value):
psr = pd.Series(list(range(10)), dtype=data_dtype)
sr = Series.from_pandas(psr)
if sr.dtype.type(fill_value) != fill_value:
with pytest.raises(TypeError):
sr.where(sr > 0, fill_value)
else:
# Cast back to original dtype as pandas automatically upcasts
expect = psr.where(psr > 0, fill_value).astype(psr.dtype)
got = sr.where(sr > 0, fill_value)
assert_eq(expect, got)
if sr.dtype.type(fill_value) != fill_value:
with pytest.raises(TypeError):
sr.where(sr < 0, fill_value)
else:
expect = psr.where(psr < 0, fill_value).astype(psr.dtype)
got = sr.where(sr < 0, fill_value)
assert_eq(expect, got)
if sr.dtype.type(fill_value) != fill_value:
with pytest.raises(TypeError):
sr.where(sr == 0, fill_value)
else:
expect = psr.where(psr == 0, fill_value).astype(psr.dtype)
got = sr.where(sr == 0, fill_value)
assert_eq(expect, got)
@pytest.mark.parametrize("fill_value", [100, 100.0, 100.5])
def test_series_with_nulls_where(fill_value):
psr = pd.Series([None] * 3 + list(range(5)))
sr = Series.from_pandas(psr)
expect = psr.where(psr > 0, fill_value)
got = sr.where(sr > 0, fill_value)
assert_eq(expect, got)
expect = psr.where(psr < 0, fill_value)
got = sr.where(sr < 0, fill_value)
assert_eq(expect, got)
expect = psr.where(psr == 0, fill_value)
got = sr.where(sr == 0, fill_value)
assert_eq(expect, got)
@pytest.mark.parametrize("fill_value", [[888, 999]])
def test_dataframe_with_nulls_where_with_scalars(fill_value):
pdf = pd.DataFrame(
{
"A": [-1, 2, -3, None, 5, 6, -7, 0],
"B": [4, -2, 3, None, 7, 6, 8, 0],
}
)
gdf = DataFrame.from_pandas(pdf)
expect = pdf.where(pdf % 3 == 0, fill_value)
got = gdf.where(gdf % 3 == 0, fill_value)
assert_eq(expect, got)
def test_dataframe_with_different_types():
# Testing for int and float
pdf = pd.DataFrame(
{"A": [111, 22, 31, 410, 56], "B": [-10.12, 121.2, 45.7, 98.4, 87.6]}
)
gdf = DataFrame.from_pandas(pdf)
expect = pdf.where(pdf > 50, -pdf)
got = gdf.where(gdf > 50, -gdf)
assert_eq(expect, got)
# Testing for string
pdf = pd.DataFrame({"A": ["a", "bc", "cde", "fghi"]})
gdf = DataFrame.from_pandas(pdf)
pdf_mask = pd.DataFrame({"A": [True, False, True, False]})
gdf_mask = DataFrame.from_pandas(pdf_mask)
expect = pdf.where(pdf_mask, ["cudf"])
got = gdf.where(gdf_mask, ["cudf"])
assert_eq(expect, got)
# Testing for categoriacal
pdf = | pd.DataFrame({"A": ["a", "b", "b", "c"]}) | pandas.DataFrame |
import numpy as np
import os
import glob
import pandas as pd
import re
# 現在のディレクトリを取得
current_dir=os.path.dirname(os.path.abspath(__file__))
# 最近傍のBounding Boxの組み合わせを記したDataFrameを返す
def nearest(a, b):
d=[]
while (len(a) > 0) and (len(b) > 0):
out_min = 9999999999.9
out_res = []
for i in a:
if i is not np.nan:
min = np.linalg.norm(i - b[0])
result = [min, i, b[0]]
for j in b:
if j is not np.nan:
c = np.linalg.norm(i - j)
if min > c:
min = c
result = [min, i.tolist(), j.tolist()]
else:
pass
if out_min > min:
out_min = min
out_res = result
a = np.delete(a, np.where(a[:] == out_res[1])[0], 0)
b = np.delete(b, np.where(b[:] == out_res[2])[0], 0)
d.append(out_res)
df = | pd.DataFrame(d) | pandas.DataFrame |
import pandas as pd
dataset_https = pd.read_csv('./3 - separate_pending_data/batch_mal_7/malicious_https.csv')
dataset_tld_urllen = pd.read_csv('./3 - separate_pending_data/batch_mal_7/malicious_tld_urllen.csv')
dataset_content = pd.read_csv('./3 - separate_pending_data/batch_mal_7/malicious_content.csv')
dataset_whois = | pd.read_csv('./3 - separate_pending_data/batch_mal_7/malicious_whois.csv') | pandas.read_csv |
import os, sys, re
import numpy as np
import argparse
atac_columns = [[3,4,5], [6,7,8], [9,10,11,12,13], [14,15,16], [17,18,19]]
data = []
cluster = []
markers = []
marker_genes = 'ATML1', 'SPCH', 'MUTE', 'FAMA', 'GASA9'
with open(sys.argv[1]) as fi:
items = fi.readline().strip().split('\t')
columns = items[3:]
if len(columns) > 10:
columns = list(marker_genes) + columns[-5:]
locations = []
for line in fi:
avals = []
evals = []
items = line.strip().split('\t')
for cols in atac_columns:
avals.append(np.mean([float(items[col]) for col in cols]))
for item in items[-5:]:
if item == '.':
evals = None
break
evals.append(np.log(float(item) + .1))
if avals is not None and evals is not None:
for m in marker_genes:
if items[1].find('({})'.format(m)) >= 0:
markers.append((len(locations), m))
# print(m)
break
locations.append(items[1])
data.append(avals + evals)
# print(avals, evals)
cluster.append(4 - int(items[0]))
import umap
import pandas as pd
import sklearn.decomposition
matrix = | pd.DataFrame(data, columns=columns, index=locations) | pandas.DataFrame |
import pandas as pd
import numpy as np
class PearsonCorrelation:
'''
A feature Selection method using Pearson Correlation.
A Pearson Correlation Coefficient is calculated using a feature and target
of a dataset and is calculated for each feature. Coefficient can take value
between 1 and -1. If the value is near 1 then that feature is positively
correlated(directly proportional) to the target, if the value is near -1
then that feature is negatively correlated(inversly proportinal) to the
target and if the value is near 0 then that feature is not related to the
target.
Parameters:
----------
features: DataFrame
Features are individual independent variables that act as the input in
your system. The features DataFrame should only contain numerical value
(if categorical value present then encode them to numerical labels).
target: Series
The target is whatever the output of the input variables.It could be
the individual classes that the input variables maybe mapped to in case
of a classification problem or the output value range in a regression
problem. The target series should also contain numerical value like
features DataFrame.
'''
def __init__(self, features, target):
self.X = features
self.y = target
def corr_score(self, sort=False, reset_index=False):
'''
Evaluate the Pearson Correlation Coefficient of each feature column
with the target column.
Parameters
----------
sort: bool, default=False
Whether to sort the order of features according to the coefficient
value. If True, the return DataFrame is sorted in descending order
according to correlation coefficients value.
reset_index: bool, default=False
Whether to reset the index of the return DataFrame. If True, then
resets the index of output dataframe.
Returns
-------
cor_score: DataFrame with one column as the features name and the other
as Coefficient value.
'''
cor_list = []
feature_name = self.X.columns.tolist()
cor_score = | pd.DataFrame(columns=['feature', 'cor_score']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.utils.config import merge_dicts
seed = 42
# ############# base.py ############# #
class MyData(vbt.Data):
@classmethod
def download_symbol(cls, symbol, shape=(5, 3), start_date=datetime(2020, 1, 1), columns=None, index_mask=None,
column_mask=None, return_arr=False, tz_localize=None, seed=seed):
np.random.seed(seed)
a = np.random.uniform(size=shape) + symbol
if return_arr:
return a
index = [start_date + timedelta(days=i) for i in range(a.shape[0])]
if a.ndim == 1:
sr = pd.Series(a, index=index, name=columns)
if index_mask is not None:
sr = sr.loc[index_mask]
if tz_localize is not None:
sr = sr.tz_localize(tz_localize)
return sr
df = pd.DataFrame(a, index=index, columns=columns)
if index_mask is not None:
df = df.loc[index_mask]
if column_mask is not None:
df = df.loc[:, column_mask]
if tz_localize is not None:
df = df.tz_localize(tz_localize)
return df
def update_symbol(self, symbol, n=1, **kwargs):
download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs)
download_kwargs['start_date'] = self.data[symbol].index[-1]
shape = download_kwargs.pop('shape', (5, 3))
new_shape = (n, shape[1]) if len(shape) > 1 else (n,)
new_seed = download_kwargs.pop('seed', seed) + 1
kwargs = merge_dicts(download_kwargs, kwargs)
return self.download_symbol(symbol, shape=new_shape, seed=new_seed, **kwargs)
class TestData:
def test_config(self, tmp_path):
data = MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2'])
assert MyData.loads(data.dumps()) == data
data.save(tmp_path / 'data')
assert MyData.load(tmp_path / 'data') == data
def test_download(self):
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), return_arr=True).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
]
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), return_arr=True).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
]
)
)
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,)).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), columns='feat0').data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index,
name='feat0'
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3)).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index,
columns=pd.Index(['feat0', 'feat1', 'feat2'], dtype='object'))
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,)).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,)).data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
1.15601864044243652
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3)).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3)).data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, 1.7319939418114051],
[1.5986584841970366, 1.15601864044243652, 1.15599452033620265],
[1.05808361216819946, 1.8661761457749352, 1.6011150117432088],
[1.7080725777960455, 1.020584494295802447, 1.9699098521619943],
[1.8324426408004217, 1.21233911067827616, 1.18182496720710062]
],
index=index
)
)
tzaware_index = pd.DatetimeIndex(
[
'2020-01-01 01:00:00',
'2020-01-02 01:00:00',
'2020-01-03 01:00:00',
'2020-01-04 01:00:00',
'2020-01-05 01:00:00'
],
dtype='datetime64[ns, Europe/Berlin]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), tz_localize='UTC', tz_convert='Europe/Berlin').data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=tzaware_index
)
)
index_mask = vbt.symbol_dict({
0: [False, True, True, True, True],
1: [True, True, True, True, False]
})
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan').data[0],
pd.Series(
[
np.nan,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan').data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
np.nan
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop').data[0],
pd.Series(
[
0.9507143064099162,
0.7319939418114051,
0.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop').data[1],
pd.Series(
[
1.9507143064099162,
1.7319939418114051,
1.5986584841970366
],
index=index[1:4]
)
)
column_mask = vbt.symbol_dict({
0: [False, True, True],
1: [True, True, False]
})
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan').data[0],
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, 0.15601864044243652, 0.15599452033620265],
[np.nan, 0.8661761457749352, 0.6011150117432088],
[np.nan, 0.020584494295802447, 0.9699098521619943],
[np.nan, 0.21233911067827616, 0.18182496720710062]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan').data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, np.nan],
[1.5986584841970366, 1.15601864044243652, np.nan],
[1.05808361216819946, 1.8661761457749352, np.nan],
[1.7080725777960455, 1.020584494295802447, np.nan],
[np.nan, np.nan, np.nan]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop').data[0],
pd.DataFrame(
[
[0.15601864044243652],
[0.8661761457749352],
[0.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop').data[1],
pd.DataFrame(
[
[1.15601864044243652],
[1.8661761457749352],
[1.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='raise', missing_columns='nan')
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='raise')
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='test', missing_columns='nan')
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='test')
def test_update(self):
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), return_arr=True).update().data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
]
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), return_arr=True).update(n=2).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896,
0.6090665392794814
]
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), return_arr=True).update().data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.11505456638977896, 0.6090665392794814, 0.13339096418598828]
]
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), return_arr=True).update(n=2).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.11505456638977896, 0.6090665392794814, 0.13339096418598828],
[0.24058961996534878, 0.3271390558111398, 0.8591374909485977]
]
)
)
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,)).update().data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
],
index=index
)
)
index2 = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05', '2020-01-06'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,)).update(n=2).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896,
0.6090665392794814
],
index=index2
)
)
tzaware_index = pd.DatetimeIndex(
[
'2020-01-01 01:00:00',
'2020-01-02 01:00:00',
'2020-01-03 01:00:00',
'2020-01-04 01:00:00',
'2020-01-05 01:00:00'
],
dtype='datetime64[ns, Europe/Berlin]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), tz_localize='UTC', tz_convert='Europe/Berlin')
.update(tz_localize=None).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
],
index=tzaware_index
)
)
index_mask = vbt.symbol_dict({
0: [False, True, True, True, True],
1: [True, True, True, True, False]
})
update_index_mask = vbt.symbol_dict({
0: [True],
1: [False]
})
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(index_mask=update_index_mask).data[0],
pd.Series(
[
np.nan,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(index_mask=update_index_mask).data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
np.nan
],
index=index
)
)
update_index_mask2 = vbt.symbol_dict({
0: [True, False],
1: [False, True]
})
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.Series(
[
np.nan,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896,
np.nan
],
index=index2
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
np.nan,
1.6090665392794814
],
index=index2
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(index_mask=update_index_mask).data[0],
pd.Series(
[
0.9507143064099162,
0.7319939418114051,
0.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(index_mask=update_index_mask).data[1],
pd.Series(
[
1.9507143064099162,
1.7319939418114051,
1.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.Series(
[
0.9507143064099162,
0.7319939418114051,
0.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.Series(
[
1.9507143064099162,
1.7319939418114051,
1.5986584841970366
],
index=index[1:4]
)
)
column_mask = vbt.symbol_dict({
0: [False, True, True],
1: [True, True, False]
})
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(index_mask=update_index_mask).data[0],
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, 0.15601864044243652, 0.15599452033620265],
[np.nan, 0.8661761457749352, 0.6011150117432088],
[np.nan, 0.020584494295802447, 0.9699098521619943],
[np.nan, 0.6090665392794814, 0.13339096418598828]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(index_mask=update_index_mask).data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, np.nan],
[1.5986584841970366, 1.15601864044243652, np.nan],
[1.05808361216819946, 1.8661761457749352, np.nan],
[1.7080725777960455, 1.020584494295802447, np.nan],
[np.nan, np.nan, np.nan]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, 0.15601864044243652, 0.15599452033620265],
[np.nan, 0.8661761457749352, 0.6011150117432088],
[np.nan, 0.020584494295802447, 0.9699098521619943],
[np.nan, 0.6090665392794814, 0.13339096418598828],
[np.nan, np.nan, np.nan]
],
index=index2
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, np.nan],
[1.5986584841970366, 1.15601864044243652, np.nan],
[1.05808361216819946, 1.8661761457749352, np.nan],
[1.7080725777960455, 1.020584494295802447, np.nan],
[np.nan, np.nan, np.nan],
[1.2405896199653488, 1.3271390558111398, np.nan]
],
index=index2
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(index_mask=update_index_mask).data[0],
pd.DataFrame(
[
[0.15601864044243652],
[0.8661761457749352],
[0.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(index_mask=update_index_mask).data[1],
pd.DataFrame(
[
[1.15601864044243652],
[1.8661761457749352],
[1.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.DataFrame(
[
[0.15601864044243652],
[0.8661761457749352],
[0.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.DataFrame(
[
[1.15601864044243652],
[1.8661761457749352],
[1.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
def test_concat(self):
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), columns='feat0').concat()['feat0'],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index,
name=0
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5,), columns='feat0').concat()['feat0'],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.9507143064099162, 1.9507143064099162],
[0.7319939418114051, 1.7319939418114051],
[0.5986584841970366, 1.5986584841970366],
[0.15601864044243652, 1.15601864044243652]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat0'],
pd.Series(
[
0.3745401188473625,
0.5986584841970366,
0.05808361216819946,
0.7080725777960455,
0.8324426408004217
],
index=index,
name=0
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat1'],
pd.Series(
[
0.9507143064099162,
0.15601864044243652,
0.8661761457749352,
0.020584494295802447,
0.21233911067827616
],
index=index,
name=0
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat2'],
pd.Series(
[
0.7319939418114051,
0.15599452033620265,
0.6011150117432088,
0.9699098521619943,
0.18182496720710062
],
index=index,
name=0
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat0'],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat1'],
pd.DataFrame(
[
[0.9507143064099162, 1.9507143064099162],
[0.15601864044243652, 1.15601864044243652],
[0.8661761457749352, 1.8661761457749352],
[0.020584494295802447, 1.020584494295802447],
[0.21233911067827616, 1.21233911067827616]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat2'],
pd.DataFrame(
[
[0.7319939418114051, 1.7319939418114051],
[0.15599452033620265, 1.15599452033620265],
[0.6011150117432088, 1.6011150117432088],
[0.9699098521619943, 1.9699098521619943],
[0.18182496720710062, 1.18182496720710062]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
def test_get(self):
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), columns='feat0').get(),
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index,
name='feat0'
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get(),
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index,
columns=pd.Index(['feat0', 'feat1', 'feat2'], dtype='object')
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get('feat0'),
pd.Series(
[
0.3745401188473625,
0.5986584841970366,
0.05808361216819946,
0.7080725777960455,
0.8324426408004217
],
index=index,
name='feat0'
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5,), columns='feat0').get(),
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.9507143064099162, 1.9507143064099162],
[0.7319939418114051, 1.7319939418114051],
[0.5986584841970366, 1.5986584841970366],
[0.15601864044243652, 1.15601864044243652]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get('feat0'),
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get(['feat0', 'feat1'])[0],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get()[0],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns= | pd.Int64Index([0, 1], dtype='int64', name='symbol') | pandas.Int64Index |
from __future__ import division
import pandas as pd
import numpy as np
import os, sys, csv
def aggregate(outputdir, metadatadir, resourcesdir):
# get list of normalized files
normalized_files = os.listdir(outputdir)
normalized_files = [f for f in normalized_files if 'normalized' in f]
# read normalized data into single df
df_aggregated = pd.concat([pd.read_csv(os.path.join(outputdir, f)) for f in normalized_files])
# add extra columns from selleck_plate_mappings
selleck_plate_mappings = pd.read_csv(os.path.join(metadatadir, 'selleck_plate_mappings.csv'))
df_aggregated = pd.merge(df_aggregated, selleck_plate_mappings, on='plate', how='left')
# add compound annotations
compound_annotations = pd.read_csv(os.path.join(resourcesdir, 'selleck_plates','Selleck_library_compound_annotations.csv'))
df_aggregated_with_annots = pd.merge(df_aggregated, compound_annotations, on=['selleck_plate','well'], how='left')
# save aggregated df
df_aggregated.to_csv(os.path.join(outputdir, "aggregated_welldata.csv"), index=False)
df_aggregated_with_annots.to_csv(os.path.join(outputdir, "aggregated_welldata_with_annots.csv"), index=False)
# drop run column
df_aggregated = df_aggregated.drop(['run'], axis=1)
# group data by timepoints (6h, 24h), averaging the runs together
df_grouped = df_aggregated.groupby(['selleck_plate', 'well', 'well_type', 'timepoint']).mean().reset_index()
df_grouped_with_annots = | pd.merge(df_grouped, compound_annotations, on=['selleck_plate', 'well'], how='left') | pandas.merge |
import cgi
import copy
import io
import json
import logging
import os
import tempfile
import threading
import time
import uuid
import math
import pandas as pd
from os.path import basename
from pathlib import Path
from urllib.parse import urlparse
import boto3
import cv2
import ffmpeg
import requests
from azure.storage.blob import BlobServiceClient
from google.cloud import storage
from .. import common
from ..api import API
from ..exceptions import (
SABaseException, SAExistingProjectNameException,
SANonExistingProjectNameException
)
from .annotation_classes import (
check_annotation_json, create_annotation_classes_from_classes_json,
fill_class_and_attribute_ids, get_annotation_classes_name_to_id,
search_annotation_classes
)
from .images import get_image_metadata, search_images, search_images_all_folders, get_project_root_folder_id
from .project_api import (
get_project_and_folder_metadata, get_project_metadata_bare,
get_project_metadata_with_users
)
from .users import get_team_contributor_metadata
from .utils import _get_upload_auth_token, _get_boto_session_by_credentials, _upload_images, _attach_urls
from tqdm import trange
from tqdm import tqdm
from ..mixp.decorators import Trackable
_NUM_THREADS = 10
_TIME_TO_UPDATE_IN_TQDM = 1
logger = logging.getLogger("superannotate-python-sdk")
_api = API.get_instance()
@Trackable
def create_project(project_name, project_description, project_type):
"""Create a new project in the team.
:param project_name: the new project's name
:type project_name: str
:param project_description: the new project's description
:type project_description: str
:param project_type: the new project type, Vector or Pixel.
:type project_type: str
:return: dict object metadata the new project
:rtype: dict
"""
try:
get_project_metadata_bare(project_name)
except SANonExistingProjectNameException:
pass
else:
raise SAExistingProjectNameException(
0, "Project with name " + project_name +
" already exists. Please use unique names for projects to use with SDK."
)
project_type = common.project_type_str_to_int(project_type)
if len(
set(project_name).intersection(
common.SPECIAL_CHARACTERS_IN_PROJECT_FOLDER_NAMES
)
) > 0:
logger.warning(
"New project name has special characters. Special characters will be replaced by underscores."
)
data = {
"team_id": str(_api.team_id),
"name": project_name,
"description": project_description,
"status": 0,
"type": project_type
}
response = _api.send_request(
req_type='POST', path='/project', json_req=data
)
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't create project " + response.text
)
res = response.json()
logger.info(
"Created project %s (ID %s) with type %s", res["name"], res["id"],
common.project_type_int_to_str(res["type"])
)
res["type"] = common.project_type_int_to_str(res["type"])
return res
@Trackable
def create_project_from_metadata(project_metadata):
"""Create a new project in the team using project metadata object dict.
Mandatory keys in project_metadata are "name", "description" and "type" (Vector or Pixel)
Non-mandatory keys: "workflow", "contributors", "settings" and "annotation_classes".
:return: dict object metadata the new project
:rtype: dict
"""
new_project_metadata = create_project(
project_metadata["name"], project_metadata["description"],
project_metadata["type"]
)
if "contributors" in project_metadata:
for user in project_metadata["contributors"]:
share_project(
new_project_metadata, user["user_id"], user["user_role"]
)
if "settings" in project_metadata:
set_project_settings(new_project_metadata, project_metadata["settings"])
if "annotation_classes" in project_metadata:
create_annotation_classes_from_classes_json(
new_project_metadata, project_metadata["annotation_classes"]
)
if "workflow" in project_metadata:
set_project_workflow(new_project_metadata, project_metadata["workflow"])
return new_project_metadata
@Trackable
def delete_project(project):
"""Deletes the project
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
team_id, project_id = project["team_id"], project["id"]
params = {"team_id": team_id}
response = _api.send_request(
req_type='DELETE', path=f'/project/{project_id}', params=params
)
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't delete project " + response.text
)
logger.info("Successfully deleted project %s.", project["name"])
@Trackable
def rename_project(project, new_name):
"""Renames the project
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param new_name: project's new name
:type new_name: str
"""
try:
get_project_metadata_bare(new_name)
except SANonExistingProjectNameException:
pass
else:
raise SAExistingProjectNameException(
0, "Project with name " + new_name +
" already exists. Please use unique names for projects to use with SDK."
)
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
team_id, project_id = project["team_id"], project["id"]
params = {"team_id": team_id}
json_req = {"name": new_name}
response = _api.send_request(
req_type='PUT',
path=f'/project/{project_id}',
params=params,
json_req=json_req
)
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't rename project " + response.text
)
logger.info(
"Successfully renamed project %s to %s.", project["name"], new_name
)
@Trackable
def get_project_image_count(project, with_all_subfolders=False):
"""Returns number of images in the project.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param with_all_subfolders: enables recursive folder counting
:type with_all_subfolders: bool
:return: number of images in the project
:rtype: int
"""
if not with_all_subfolders:
return len(search_images(project))
else:
return len(search_images_all_folders(project))
def _get_target_frames_count(video_path, target_fps):
"""
Get video frames count
"""
# deprecated : because cv2.CAP_PROP_FPS in not reliable
video = cv2.VideoCapture(str(video_path), cv2.CAP_FFMPEG)
total_num_of_frames = 0
flag = True
while flag:
flag, _ = video.read()
if flag:
total_num_of_frames += 1
else:
break
return math.ceil(
(total_num_of_frames * target_fps) / video.get(cv2.CAP_PROP_FPS)
)
def _get_video_frames_count(video_path):
"""
Get video frames count
"""
video = cv2.VideoCapture(str(video_path), cv2.CAP_FFMPEG)
total_num_of_frames = 0
flag = True
while flag:
flag, _ = video.read()
if flag:
total_num_of_frames += 1
else:
break
return total_num_of_frames
def _get_video_fps_ration(target_fps, video, ratio, log):
"""
Get video fps / target fps ratio
"""
video_fps = float(video.get(cv2.CAP_PROP_FPS))
if target_fps >= video_fps:
if log:
logger.warning(
"Video frame rate %s smaller than target frame rate %s. Cannot change frame rate.",
video_fps, target_fps
)
else:
if log:
logger.info(
"Changing video frame rate from %s to target frame rate %s.",
video_fps, target_fps
)
ratio = video_fps / target_fps
return ratio
def _get_available_image_counts(project, folder):
if folder:
folder_id = folder["id"]
else:
folder_id = get_project_root_folder_id(project)
params = {'team_id': project['team_id'], 'folder_id': folder_id}
res = _get_upload_auth_token(params=params, project_id=project['id'])
return res['availableImageCount']
def _get_video_rotate_code(video_path, log=True):
rotate_code = None
try:
cv2_rotations = {
90: cv2.ROTATE_90_CLOCKWISE,
180: cv2.ROTATE_180,
270: cv2.ROTATE_90_COUNTERCLOCKWISE,
}
meta_dict = ffmpeg.probe(str(video_path))
rot = int(meta_dict['streams'][0]['tags']['rotate'])
rotate_code = cv2_rotations[rot]
if rot != 0 and log:
logger.info(
"Frame rotation of %s found. Output images will be rotated accordingly.",
rot
)
except Exception as e:
warning_str = ""
if "ffprobe" in str(e) and log:
warning_str = "This could be because ffmpeg package is not installed. To install it, run: sudo apt install ffmpeg"
if log:
logger.warning(
"Couldn't read video metadata to determine rotation. %s",
warning_str
)
return rotate_code
def _extract_frames_from_video(
start_time, end_time, video_path, tempdir, limit, target_fps, chunk_size,
save
):
video = cv2.VideoCapture(str(video_path), cv2.CAP_FFMPEG)
if not video.isOpened():
raise SABaseException(0, "Couldn't open video file " + str(video_path))
total_num_of_frames = _get_video_frames_count(video_path)
if save:
logger.info("Video frame count is %s.", total_num_of_frames)
ratio = 1.0
if target_fps:
ratio = _get_video_fps_ration(target_fps, video, ratio, log=save)
rotate_code = _get_video_rotate_code(video_path, log=save)
video_name = Path(video_path).stem
frame_no = 0
frame_no_with_change = 1.0
extracted_frame_no = 1
if save:
logger.info("Extracting frames from video to %s.", tempdir.name)
zero_fill_count = len(str(total_num_of_frames))
extracted_frames_paths = []
all_paths = []
while len(all_paths) < limit:
success, frame = video.read()
if not success:
break
frame_no += 1
if round(frame_no_with_change) != frame_no:
continue
frame_no_with_change += ratio
frame_time = video.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
if end_time and frame_time > end_time:
break
if frame_time < start_time:
continue
if rotate_code:
frame = cv2.rotate(frame, rotate_code)
path = str(
Path(tempdir.name) / (
video_name + "_" +
str(extracted_frame_no).zfill(zero_fill_count) + ".jpg"
)
)
if save:
cv2.imwrite(path, frame)
extracted_frames_paths.append(path)
extracted_frame_no += 1
if len(extracted_frames_paths) % chunk_size == 0:
q = extracted_frames_paths
all_paths += extracted_frames_paths
extracted_frames_paths = []
yield q
if extracted_frames_paths:
q = extracted_frames_paths
all_paths += extracted_frames_paths
extracted_frames_paths = []
yield q
return extracted_frames_paths
@Trackable
def upload_video_to_project(
project,
video_path,
target_fps=None,
start_time=0.0,
end_time=None,
annotation_status="NotStarted",
image_quality_in_editor=None
):
"""Uploads image frames from video to platform. Uploaded images will have
names "<video_name>_<frame_no>.jpg".
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param video_path: video to upload
:type video_path: Pathlike (str or Path)
:param target_fps: how many frames per second need to extract from the video (approximate).
If None, all frames will be uploaded
:type target_fps: float
:param start_time: Time (in seconds) from which to start extracting frames
:type start_time: float
:param end_time: Time (in seconds) up to which to extract frames. If None up to end
:type end_time: float
:param annotation_status: value to set the annotation statuses of the uploaded
video frames NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: filenames of uploaded images
:rtype: list of strs
"""
project, folder = get_project_and_folder_metadata(project)
if folder:
folder_id = folder["id"]
else:
folder_id = get_project_root_folder_id(project)
if image_quality_in_editor is None:
image_quality_in_editor = get_project_default_image_quality_in_editor(
project
)
limit = _get_available_image_counts(project, folder)
upload_state = common.upload_state_int_to_str(project.get("upload_state"))
if upload_state == "External":
raise SABaseException(
0,
"The function does not support projects containing images attached with URLs"
)
logger.info("Uploading from video %s.", str(video_path))
tempdir = tempfile.TemporaryDirectory()
upload_file_names = []
chunk_size = 100
all_frames_count = 0
for frames_path in _extract_frames_from_video(
start_time, end_time, video_path, tempdir, limit, target_fps,
chunk_size, False
):
all_frames_count += len(frames_path)
pbar = None
for _ in _extract_frames_from_video(
start_time,
end_time,
video_path,
tempdir,
limit,
target_fps,
chunk_size,
save=True
):
if not pbar:
pbar = tqdm(total=all_frames_count)
files = os.listdir(tempdir.name)
image_paths = [f'{tempdir.name}/{f}' for f in files]
uploaded = _upload_images(
img_paths=image_paths,
team_id=project['team_id'],
folder_id=folder_id,
project_id=project['id'],
annotation_status=common.
annotation_status_str_to_int(annotation_status),
image_quality_in_editor=image_quality_in_editor,
folder_name="folder_name",
project=project,
from_s3_bucket=None,
disable_loading=True
)
for path in image_paths:
os.remove(path)
upload_file_names += [Path(f).name for f in uploaded[0]]
pbar.update(len(uploaded[0]))
if pbar:
pbar.close()
return upload_file_names
@Trackable
def upload_videos_from_folder_to_project(
project,
folder_path,
extensions=common.DEFAULT_VIDEO_EXTENSIONS,
exclude_file_patterns=(),
recursive_subfolders=False,
target_fps=None,
start_time=0.0,
end_time=None,
annotation_status="NotStarted",
image_quality_in_editor=None
):
"""Uploads image frames from all videos with given extensions from folder_path to the project.
Sets status of all the uploaded images to set_status if it is not None.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param folder_path: from which folder to upload the videos
:type folder_path: Pathlike (str or Path)
:param extensions: tuple or list of filename extensions to include from folder
:type extensions: tuple or list of strs
:param exclude_file_patterns: filename patterns to exclude from uploading
:type exclude_file_patterns: listlike of strs
:param recursive_subfolders: enable recursive subfolder parsing
:type recursive_subfolders: bool
:param target_fps: how many frames per second need to extract from the video (approximate).
If None, all frames will be uploaded
:type target_fps: float
:param start_time: Time (in seconds) from which to start extracting frames
:type start_time: float
:param end_time: Time (in seconds) up to which to extract frames. If None up to end
:type end_time: float
:param annotation_status: value to set the annotation statuses of the uploaded images NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: uploaded and not-uploaded video frame images' filenames
:rtype: tuple of list of strs
"""
project, folder = get_project_and_folder_metadata(project)
upload_state = common.upload_state_int_to_str(project.get("upload_state"))
if upload_state == "External":
raise SABaseException(
0,
"The function does not support projects containing images attached with URLs"
)
if recursive_subfolders:
logger.warning(
"When using recursive subfolder parsing same name videos in different subfolders will overwrite each other."
)
if not isinstance(extensions, (list, tuple)):
raise SABaseException(
0,
"extensions should be a list or a tuple in upload_images_from_folder_to_project"
)
logger.info(
"Uploading all videos with extensions %s from %s to project %s. Excluded file patterns are: %s.",
extensions, folder_path, project["name"], exclude_file_patterns
)
paths = []
for extension in extensions:
if not recursive_subfolders:
paths += list(Path(folder_path).glob(f'*.{extension.lower()}'))
if os.name != "nt":
paths += list(Path(folder_path).glob(f'*.{extension.upper()}'))
else:
paths += list(Path(folder_path).rglob(f'*.{extension.lower()}'))
if os.name != "nt":
paths += list(Path(folder_path).rglob(f'*.{extension.upper()}'))
filtered_paths = []
for path in paths:
not_in_exclude_list = [
x not in Path(path).name for x in exclude_file_patterns
]
if all(not_in_exclude_list):
filtered_paths.append(path)
filenames = []
for path in filtered_paths:
filenames += upload_video_to_project(
(project, folder),
path,
target_fps=target_fps,
start_time=start_time,
end_time=end_time,
annotation_status=annotation_status,
image_quality_in_editor=image_quality_in_editor
)
return filenames
@Trackable
def upload_images_from_folder_to_project(
project,
folder_path,
extensions=common.DEFAULT_IMAGE_EXTENSIONS,
annotation_status="NotStarted",
from_s3_bucket=None,
exclude_file_patterns=common.DEFAULT_FILE_EXCLUDE_PATTERNS,
recursive_subfolders=False,
image_quality_in_editor=None
):
"""Uploads all images with given extensions from folder_path to the project.
Sets status of all the uploaded images to set_status if it is not None.
If an image with existing name already exists in the project it won't be uploaded,
and its path will be appended to the third member of return value of this
function.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param folder_path: from which folder to upload the images
:type folder_path: Pathlike (str or Path)
:param extensions: tuple or list of filename extensions to include from folder
:type extensions: tuple or list of strs
:param annotation_status: value to set the annotation statuses of the uploaded images NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param from_s3_bucket: AWS S3 bucket to use. If None then folder_path is in local filesystem
:type from_s3_bucket: str
:param exclude_file_patterns: filename patterns to exclude from uploading,
default value is to exclude SuperAnnotate export related ["___save.png", "___fuse.png"]
:type exclude_file_patterns: list or tuple of strs
:param recursive_subfolders: enable recursive subfolder parsing
:type recursive_subfolders: bool
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: uploaded, could-not-upload, existing-images filepaths
:rtype: tuple (3 members) of list of strs
"""
project, project_folder = get_project_and_folder_metadata(project)
project_folder_name = project["name"] + (
f'/{project_folder["name"]}' if project_folder else ""
)
upload_state = common.upload_state_int_to_str(project.get("upload_state"))
if upload_state == "External":
raise SABaseException(
0,
"The function does not support projects containing images attached with URLs"
)
if recursive_subfolders:
logger.info(
"When using recursive subfolder parsing same name images in different subfolders will overwrite each other."
)
if not isinstance(extensions, (list, tuple)):
raise SABaseException(
0,
"extensions should be a list or a tuple in upload_images_from_folder_to_project"
)
logger.info(
"Uploading all images with extensions %s from %s to project %s. Excluded file patterns are: %s.",
extensions, folder_path, project_folder_name, exclude_file_patterns
)
if from_s3_bucket is None:
paths = []
for extension in extensions:
if not recursive_subfolders:
paths += list(Path(folder_path).glob(f'*.{extension.lower()}'))
if os.name != "nt":
paths += list(
Path(folder_path).glob(f'*.{extension.upper()}')
)
else:
paths += list(Path(folder_path).rglob(f'*.{extension.lower()}'))
if os.name != "nt":
paths += list(
Path(folder_path).rglob(f'*.{extension.upper()}')
)
else:
s3_client = boto3.client('s3')
paginator = s3_client.get_paginator('list_objects_v2')
response_iterator = paginator.paginate(
Bucket=from_s3_bucket, Prefix=folder_path
)
paths = []
for response in response_iterator:
for object_data in response['Contents']:
key = object_data['Key']
if not recursive_subfolders and '/' in key[len(folder_path) +
1:]:
continue
for extension in extensions:
if key.endswith(f'.{extension.lower()}'
) or key.endswith(f'.{extension.upper()}'):
paths.append(key)
break
filtered_paths = []
for path in paths:
not_in_exclude_list = [
x not in Path(path).name for x in exclude_file_patterns
]
if all(not_in_exclude_list):
filtered_paths.append(path)
return upload_images_to_project(
(project, project_folder), filtered_paths, annotation_status,
from_s3_bucket, image_quality_in_editor
)
@Trackable
def upload_images_to_project(
project,
img_paths,
annotation_status="NotStarted",
from_s3_bucket=None,
image_quality_in_editor=None
):
"""Uploads all images given in list of path objects in img_paths to the project.
Sets status of all the uploaded images to set_status if it is not None.
If an image with existing name already exists in the project it won't be uploaded,
and its path will be appended to the third member of return value of this
function.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param img_paths: list of Pathlike (str or Path) objects to upload
:type img_paths: list
:param annotation_status: value to set the annotation statuses of the uploaded images NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param from_s3_bucket: AWS S3 bucket to use. If None then folder_path is in local filesystem
:type from_s3_bucket: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: uploaded, could-not-upload, existing-images filepaths
:rtype: tuple (3 members) of list of strs
"""
project, folder = get_project_and_folder_metadata(project)
folder_name = project["name"] + (f'/{folder["name"]}' if folder else "")
upload_state = common.upload_state_int_to_str(project.get("upload_state"))
if upload_state == "External":
raise SABaseException(
0,
"The function does not support projects containing images attached with URLs"
)
if not isinstance(img_paths, list):
raise SABaseException(
0, "img_paths argument to upload_images_to_project should be a list"
)
annotation_status = common.annotation_status_str_to_int(annotation_status)
if image_quality_in_editor is None:
image_quality_in_editor = get_project_default_image_quality_in_editor(
project
)
team_id, project_id = project["team_id"], project["id"]
if folder:
folder_id = folder["id"]
else:
folder_id = get_project_root_folder_id(project)
list_of_uploaded, list_of_not_uploaded, duplicate_images = _upload_images(
img_paths=img_paths,
team_id=team_id,
folder_id=folder_id,
project_id=project_id,
annotation_status=annotation_status,
from_s3_bucket=from_s3_bucket,
image_quality_in_editor=image_quality_in_editor,
project=project,
folder_name=folder_name
)
return (list_of_uploaded, list_of_not_uploaded, duplicate_images)
def _tqdm_download(
total_num, images_to_upload, images_not_uploaded,
duplicate_images_filenames, finish_event
):
with tqdm(total=total_num) as pbar:
while True:
finished = finish_event.wait(1)
if not finished:
sum_all = 0
sum_all += len(images_not_uploaded)
sum_all += len(images_to_upload)
sum_all += len(duplicate_images_filenames)
pbar.update(sum_all - pbar.n)
else:
pbar.update(total_num - pbar.n)
break
@Trackable
def attach_image_urls_to_project(
project, attachments, annotation_status="NotStarted"
):
"""Link images on external storage to SuperAnnotate.
:param project: project name or project folder path
:type project: str or dict
:param attachments: path to csv file on attachments metadata
:type attachments: Pathlike (str or Path)
:param annotation_status: value to set the annotation statuses of the linked images: NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:return: attached images, failed images, skipped images
:rtype: (list, list, list)
"""
get_project_and_folder_metadata(project)
return attach_file_urls_to_project(project, attachments, annotation_status)
@Trackable
def upload_images_from_public_urls_to_project(
project,
img_urls,
img_names=None,
annotation_status='NotStarted',
image_quality_in_editor=None
):
"""Uploads all images given in the list of URL strings in img_urls to the project.
Sets status of all the uploaded images to annotation_status if it is not None.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param img_urls: list of str objects to upload
:type img_urls: list
:param img_names: list of str names for each urls in img_url list
:type img_names: list
:param annotation_status: value to set the annotation statuses of the uploaded images NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: uploaded images' urls, uploaded images' filenames, duplicate images' filenames and not-uploaded images' urls
:rtype: tuple of list of strs
"""
if img_names is not None and len(img_names) != len(img_urls):
raise SABaseException(0, "Not all image URLs have corresponding names.")
images_not_uploaded = []
images_to_upload = []
duplicate_images_filenames = []
path_to_url = {}
project, project_folder = get_project_and_folder_metadata(project)
upload_state = common.upload_state_int_to_str(project.get("upload_state"))
if upload_state == "External":
raise SABaseException(
0,
"The function does not support projects containing images attached with URLs"
)
finish_event = threading.Event()
tqdm_thread = threading.Thread(
target=_tqdm_download,
args=(
len(img_urls), images_to_upload, images_not_uploaded,
duplicate_images_filenames, finish_event
),
daemon=True
)
logger.info('Downloading %s images', len(img_urls))
tqdm_thread.start()
with tempfile.TemporaryDirectory() as save_dir_name:
save_dir = Path(save_dir_name)
for i, img_url in enumerate(img_urls):
try:
response = requests.get(img_url)
response.raise_for_status()
except Exception as e:
logger.warning(
"Couldn't download image %s, %s", img_url, str(e)
)
images_not_uploaded.append(img_url)
else:
if not img_names:
if response.headers.get('Content-Disposition') is not None:
img_path = save_dir / cgi.parse_header(
response.headers['Content-Disposition']
)[1]['filename']
else:
img_path = save_dir / basename(urlparse(img_url).path)
else:
img_path = save_dir / img_names[i]
if str(img_path) in path_to_url.keys():
duplicate_images_filenames.append(basename(img_path))
continue
with open(img_path, 'wb') as f:
f.write(response.content)
path_to_url[str(img_path)] = img_url
images_to_upload.append(img_path)
finish_event.set()
tqdm_thread.join()
images_uploaded_paths, images_not_uploaded_paths, duplicate_images_paths = upload_images_to_project(
(project, project_folder),
images_to_upload,
annotation_status=annotation_status,
image_quality_in_editor=image_quality_in_editor
)
images_not_uploaded.extend(
[path_to_url[str(path)] for path in images_not_uploaded_paths]
)
images_uploaded = [
path_to_url[str(path)] for path in images_uploaded_paths
]
images_uploaded_filenames = [
basename(path) for path in images_uploaded_paths
]
duplicate_images_filenames.extend(
[basename(path) for path in duplicate_images_paths]
)
return (
images_uploaded, images_uploaded_filenames, duplicate_images_filenames,
images_not_uploaded
)
@Trackable
def upload_images_from_google_cloud_to_project(
project,
google_project,
bucket_name,
folder_path,
annotation_status='NotStarted',
image_quality_in_editor=None
):
"""Uploads all images present in folder_path at bucket_name in google_project to the project.
Sets status of all the uploaded images to set_status if it is not None.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param google_project: the project name on google cloud, where the bucket resides
:type google_project: str
:param bucket_name: the name of the bucket where the images are stored
:type bucket_name: str
:param folder_path: path of the folder on the bucket where the images are stored
:type folder_path: str
:param annotation_status: value to set the annotation statuses of the uploaded images NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: uploaded images' urls, uploaded images' filenames, duplicate images' filenames and not-uploaded images' urls
:rtype: tuple of list of strs
"""
images_not_uploaded = []
images_to_upload = []
duplicate_images_filenames = []
path_to_url = {}
project, project_folder = get_project_and_folder_metadata(project)
upload_state = common.upload_state_int_to_str(project.get("upload_state"))
if upload_state == "External":
raise SABaseException(
0,
"The function does not support projects containing images attached with URLs"
)
cloud_client = storage.Client(project=google_project)
bucket = cloud_client.get_bucket(bucket_name)
image_blobs = bucket.list_blobs(prefix=folder_path)
with tempfile.TemporaryDirectory() as save_dir_name:
save_dir = Path(save_dir_name)
for image_blob in image_blobs:
if image_blob.content_type.split('/')[0] != 'image':
continue
image_name = basename(image_blob.name)
image_save_pth = save_dir / image_name
if image_save_pth in path_to_url.keys():
duplicate_images_filenames.append(basename(image_save_pth))
continue
try:
image_blob.download_to_filename(image_save_pth)
except Exception as e:
logger.warning(
"Couldn't download image %s, %s", image_blob.name, str(e)
)
images_not_uploaded.append(image_blob.name)
else:
path_to_url[str(image_save_pth)] = image_blob.name
images_to_upload.append(image_save_pth)
images_uploaded_paths, images_not_uploaded_paths, duplicate_images_paths = upload_images_to_project(
(project, project_folder),
images_to_upload,
annotation_status=annotation_status,
image_quality_in_editor=image_quality_in_editor
)
images_not_uploaded.extend(
[path_to_url[str(path)] for path in images_not_uploaded_paths]
)
images_uploaded = [
path_to_url[str(path)] for path in images_uploaded_paths
]
images_uploaded_filenames = [
basename(path) for path in images_uploaded_paths
]
duplicate_images_filenames.extend(
[basename(path) for path in duplicate_images_paths]
)
return (
images_uploaded, images_uploaded_filenames, duplicate_images_filenames,
images_not_uploaded
)
@Trackable
def upload_images_from_azure_blob_to_project(
project,
container_name,
folder_path,
annotation_status='NotStarted',
image_quality_in_editor=None
):
"""Uploads all images present in folder_path at container_name Azure blob storage to the project.
Sets status of all the uploaded images to set_status if it is not None.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param container_name: container name of the Azure blob storage
:type container_name: str
:param folder_path: path of the folder on the bucket where the images are stored
:type folder_path: str
:param annotation_status: value to set the annotation statuses of the uploaded images NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
:return: uploaded images' urls, uploaded images' filenames, duplicate images' filenames and not-uploaded images' urls
:rtype: tuple of list of strs
"""
images_not_uploaded = []
images_to_upload = []
duplicate_images_filenames = []
path_to_url = {}
project, project_folder = get_project_and_folder_metadata(project)
upload_state = common.upload_state_int_to_str(project.get("upload_state"))
if upload_state == "External":
raise SABaseException(
0,
"The function does not support projects containing images attached with URLs"
)
connect_key = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
blob_service_client = BlobServiceClient.from_connection_string(connect_key)
container_client = blob_service_client.get_container_client(container_name)
image_blobs = container_client.list_blobs(name_starts_with=folder_path)
with tempfile.TemporaryDirectory() as save_dir_name:
save_dir = Path(save_dir_name)
for image_blob in image_blobs:
content_type = image_blob.content_settings.get('content_type')
if content_type is None:
logger.warning(
"Couldn't download image %s, content type could not be verified",
image_blob.name
)
continue
if content_type.split('/')[0] != 'image':
continue
image_name = basename(image_blob.name)
image_save_pth = save_dir / image_name
if image_save_pth in path_to_url.keys():
duplicate_images_filenames.append(basename(image_save_pth))
continue
try:
image_blob_client = blob_service_client.get_blob_client(
container=container_name, blob=image_blob
)
image_stream = image_blob_client.download_blob()
except Exception as e:
logger.warning(
"Couldn't download image %s, %s", image_blob.name, str(e)
)
images_not_uploaded.append(image_blob.name)
else:
with open(image_save_pth, 'wb') as image_file:
image_file.write(image_stream.readall())
path_to_url[str(image_save_pth)] = image_blob.name
images_to_upload.append(image_save_pth)
images_uploaded_paths, images_not_uploaded_paths, duplicate_images_paths = upload_images_to_project(
(project, project_folder),
images_to_upload,
annotation_status=annotation_status,
image_quality_in_editor=image_quality_in_editor
)
images_not_uploaded.extend(
[path_to_url[str(path)] for path in images_not_uploaded_paths]
)
images_uploaded = [
path_to_url[str(path)] for path in images_uploaded_paths
]
images_uploaded_filenames = [
basename(path) for path in images_uploaded_paths
]
duplicate_images_filenames.extend(
[basename(path) for path in duplicate_images_paths]
)
return (
images_uploaded, images_uploaded_filenames, duplicate_images_filenames,
images_not_uploaded
)
def __upload_annotations_thread(
team_id, project_id, project_type, anns_filenames, folder_path,
annotation_classes_dict, pre, thread_id, chunksize, missing_images,
couldnt_upload, uploaded, from_s3_bucket, project_folder_id
):
NUM_TO_SEND = 500
len_anns = len(anns_filenames)
start_index = thread_id * chunksize
if start_index >= len_anns:
return
end_index = min(start_index + chunksize, len_anns)
postfix_json = '___objects.json' if project_type == "Vector" else '___pixel.json'
len_postfix_json = len(postfix_json)
postfix_mask = '___save.png'
if from_s3_bucket is not None:
from_session = boto3.Session()
from_s3 = from_session.resource('s3')
for i in range(start_index, end_index, NUM_TO_SEND):
names = []
for j in range(i, i + NUM_TO_SEND):
if j >= end_index:
break
image_name = anns_filenames[j][:-len_postfix_json]
names.append(image_name)
try:
metadatas = get_image_metadata(
({
"id": project_id
}, {
"id": project_folder_id
}), names, False
)
except SABaseException:
metadatas = []
names_in_metadatas = [metadata["name"] for metadata in metadatas]
id_to_name = {
metadata["id"]: metadata["name"]
for metadata in metadatas
}
if len(metadatas) < len(names):
for name in names:
if name not in names_in_metadatas:
ann_path = Path(folder_path) / (name + postfix_json)
missing_images[thread_id].append(ann_path)
logger.warning(
"Couldn't find image %s for annotation upload", ann_path
)
data = {
"project_id": project_id,
"team_id": team_id,
"ids": [metadata["id"] for metadata in metadatas],
"folder_id": project_folder_id
}
endpoint = '/images/getAnnotationsPathsAndTokens' if pre == "" else '/images/getPreAnnotationsPathsAndTokens'
response = _api.send_request(
req_type='POST', path=endpoint, json_req=data
)
if not response.ok:
logger.warning(
"Couldn't get token upload annotations %s", response.text
)
continue
res = response.json()
aws_creds = res["creds"]
s3_session = _get_boto_session_by_credentials(aws_creds)
s3_resource = s3_session.resource('s3')
bucket = s3_resource.Bucket(aws_creds["bucket"])
for image_id, image_info in res['images'].items():
image_name = id_to_name[int(image_id)]
json_filename = image_name + postfix_json
if from_s3_bucket is None:
full_path = Path(folder_path) / json_filename
annotation_json = json.load(open(full_path))
else:
file = io.BytesIO()
full_path = folder_path + json_filename
from_s3_object = from_s3.Object(from_s3_bucket, full_path)
from_s3_object.download_fileobj(file)
file.seek(0)
annotation_json = json.load(file)
if not check_annotation_json(annotation_json):
couldnt_upload[thread_id].append(full_path)
logger.warning(
"Annotation JSON %s missing width or height info. Skipping upload",
full_path
)
continue
fill_class_and_attribute_ids(
annotation_json, annotation_classes_dict
)
bucket.put_object(
Key=image_info["annotation_json_path"],
Body=json.dumps(annotation_json)
)
if project_type == "Pixel":
mask_filename = image_name + postfix_mask
if from_s3_bucket is None:
with open(Path(folder_path) / mask_filename, 'rb') as fin:
file = io.BytesIO(fin.read())
else:
file = io.BytesIO()
from_s3_object = from_s3.Object(
from_s3_bucket, folder_path + mask_filename
)
from_s3_object.download_fileobj(file)
file.seek(0)
bucket.put_object(
Key=image_info["annotation_bluemap_path"], Body=file
)
uploaded[thread_id].append(full_path)
@Trackable
def upload_annotations_from_folder_to_project(
project, folder_path, from_s3_bucket=None, recursive_subfolders=False
):
"""Finds and uploads all JSON files in the folder_path as annotations to the project.
The JSON files should follow specific naming convention. For Vector
projects they should be named "<image_filename>___objects.json" (e.g., if
image is cats.jpg the annotation filename should be cats.jpg___objects.json), for Pixel projects
JSON file should be named "<image_filename>___pixel.json" and also second mask
image file should be present with the name "<image_name>___save.png". In both cases
image with <image_name> should be already present on the platform.
Existing annotations will be overwritten.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param from_s3_bucket: AWS S3 bucket to use. If None then folder_path is in local filesystem
:type from_s3_bucket: str
:param recursive_subfolders: enable recursive subfolder parsing
:type recursive_subfolders: bool
:return: paths to annotations uploaded, could-not-upload, missing-images
:rtype: tuple of list of strs
"""
get_project_and_folder_metadata(project)
return _upload_pre_or_annotations_from_folder_to_project(
project, folder_path, "", from_s3_bucket, recursive_subfolders
)
def _upload_pre_or_annotations_from_folder_to_project(
project, folder_path, pre, from_s3_bucket=None, recursive_subfolders=False
):
if recursive_subfolders:
logger.info(
"When using recursive subfolder parsing same name %sannotations in different subfolders will overwrite each other.",
pre
)
logger.info(
"The JSON files should follow specific naming convention. For Vector projects they should be named '<image_name>___objects.json', for Pixel projects JSON file should be names '<image_name>___pixel.json' and also second mask image file should be present with the name '<image_name>___save.png'. In both cases image with <image_name> should be already present on the platform."
)
logger.info("Existing %sannotations will be overwritten.", pre)
project, project_folder = get_project_and_folder_metadata(project)
project = get_project_metadata(project['name'])
if project_folder is not None:
project_folder_id = project_folder["id"]
else:
project_folder_id = None
return _upload_annotations_from_folder_to_project(
project, folder_path, pre, from_s3_bucket, recursive_subfolders,
project_folder_id
)
def _upload_annotations_from_folder_to_project(
project,
folder_path,
pre,
from_s3_bucket=None,
recursive_subfolders=False,
project_folder_id=None
):
return_result = []
if from_s3_bucket is not None:
if not folder_path.endswith('/'):
folder_path = folder_path + '/'
if recursive_subfolders:
if from_s3_bucket is None:
for path in Path(folder_path).glob('*'):
if path.is_dir():
return_result += _upload_annotations_from_folder_to_project(
project, path, pre, from_s3_bucket,
recursive_subfolders, project_folder_id
)
else:
s3_client = boto3.client('s3')
result = s3_client.list_objects(
Bucket=from_s3_bucket, Prefix=folder_path, Delimiter='/'
)
results = result.get('CommonPrefixes')
if results is not None:
for o in results:
return_result += _upload_annotations_from_folder_to_project(
project, o.get('Prefix'), pre, from_s3_bucket,
recursive_subfolders, project_folder_id
)
team_id, project_id, project_type = project["team_id"], project[
"id"], project["type"]
logger.info(
"Uploading all annotations from %s to project %s.", folder_path,
project["name"]
)
annotations_paths = []
annotations_filenames = []
if from_s3_bucket is None:
for path in Path(folder_path).glob('*.json'):
if path.name.endswith('___objects.json'
) or path.name.endswith('___pixel.json'):
annotations_paths.append(path)
annotations_filenames.append(path.name)
else:
s3_client = boto3.client('s3')
paginator = s3_client.get_paginator('list_objects_v2')
response_iterator = paginator.paginate(
Bucket=from_s3_bucket, Prefix=folder_path
)
for response in response_iterator:
for object_data in response['Contents']:
key = object_data['Key']
if '/' in key[len(folder_path) + 1:]:
continue
if key.endswith('___objects.json'
) or key.endswith('___pixel.json'):
annotations_paths.append(key)
annotations_filenames.append(Path(key).name)
len_annotations_paths = len(annotations_paths)
logger.info(
"Uploading %s annotations to project %s.", len_annotations_paths,
project["name"]
)
if len_annotations_paths == 0:
return return_result
uploaded = []
for _ in range(_NUM_THREADS):
uploaded.append([])
couldnt_upload = []
for _ in range(_NUM_THREADS):
couldnt_upload.append([])
missing_image = []
for _ in range(_NUM_THREADS):
missing_image.append([])
finish_event = threading.Event()
tqdm_thread = threading.Thread(
target=__tqdm_thread_upload_annotations,
args=(
len_annotations_paths, uploaded, couldnt_upload, missing_image,
finish_event
),
daemon=True
)
tqdm_thread.start()
annotation_classes = search_annotation_classes(project)
annotation_classes_dict = get_annotation_classes_name_to_id(
annotation_classes
)
chunksize = int(math.ceil(len_annotations_paths / _NUM_THREADS))
threads = []
for thread_id in range(_NUM_THREADS):
t = threading.Thread(
target=__upload_annotations_thread,
args=(
team_id, project_id, project_type, annotations_filenames,
folder_path, annotation_classes_dict, pre, thread_id, chunksize,
missing_image, couldnt_upload, uploaded, from_s3_bucket,
project_folder_id
),
daemon=True
)
threads.append(t)
t.start()
for t in threads:
t.join()
finish_event.set()
tqdm_thread.join()
list_of_not_uploaded = []
for couldnt_upload_thread in couldnt_upload:
for file in couldnt_upload_thread:
list_of_not_uploaded.append(str(file))
list_of_uploaded = []
for upload_thread in uploaded:
for file in upload_thread:
list_of_uploaded.append(str(file))
list_of_missing_images = []
for missing_thread in missing_image:
for file in missing_thread:
list_of_missing_images.append(str(file))
# print(return_result)
return (list_of_uploaded, list_of_not_uploaded, list_of_missing_images)
def __tqdm_thread_upload_annotations(
total_num, uploaded, couldnt_upload, missing_image, finish_event
):
with tqdm(total=total_num) as pbar:
while True:
finished = finish_event.wait(_TIME_TO_UPDATE_IN_TQDM)
if not finished:
sum_all = 0
for i in couldnt_upload:
sum_all += len(i)
for i in uploaded:
sum_all += len(i)
for i in missing_image:
sum_all += len(i)
pbar.update(sum_all - pbar.n)
else:
pbar.update(total_num - pbar.n)
break
@Trackable
def upload_preannotations_from_folder_to_project(
project, folder_path, from_s3_bucket=None, recursive_subfolders=False
):
"""Finds and uploads all JSON files in the folder_path as pre-annotations to the project.
The JSON files should follow specific naming convention. For Vector
projects they should be named "<image_filename>___objects.json" (e.g., if
image is cats.jpg the annotation filename should be cats.jpg___objects.json), for Pixel projects
JSON file should be named "<image_filename>___pixel.json" and also second mask
image file should be present with the name "<image_name>___save.png". In both cases
image with <image_name> should be already present on the platform.
Existing pre-annotations will be overwritten.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param folder_path: from which folder to upload the pre-annotations
:type folder_path: Pathlike (str or Path)
:param from_s3_bucket: AWS S3 bucket to use. If None then folder_path is in local filesystem
:type from_s3_bucket: str
:param recursive_subfolders: enable recursive subfolder parsing
:type recursive_subfolders: bool
:return: paths to pre-annotations uploaded and could-not-upload
:rtype: tuple of list of strs
"""
get_project_and_folder_metadata(project)
return _upload_pre_or_annotations_from_folder_to_project(
project, folder_path, "pre", from_s3_bucket, recursive_subfolders
)
@Trackable
def share_project(project, user, user_role):
"""Share project with user.
:param project: project name
:type project: str
:param user: user email or metadata of the user to share project with
:type user: str or dict
:param user_role: user role to apply, one of Admin , Annotator , QA , Customer , Viewer
:type user_role: str
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
if not isinstance(user, dict):
user = get_team_contributor_metadata(user)
user_role = common.user_role_str_to_int(user_role)
team_id, project_id = project["team_id"], project["id"]
user_id = user["id"]
json_req = {"user_id": user_id, "user_role": user_role}
params = {'team_id': team_id}
response = _api.send_request(
req_type='POST',
path=f'/project/{project_id}/share',
params=params,
json_req=json_req
)
if not response.ok:
raise SABaseException(response.status_code, response.text)
logger.info(
"Shared project %s with user %s and role %s", project["name"],
user["email"], common.user_role_int_to_str(user_role)
)
@Trackable
def unshare_project(project, user):
"""Unshare (remove) user from project.
:param project: project name
:type project: str
:param user: user email or metadata of the user to unshare project
:type user: str or dict
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
if not isinstance(user, dict):
user = get_team_contributor_metadata(user)
team_id, project_id = project["team_id"], project["id"]
user_id = user["id"]
json_req = {"user_id": user_id}
params = {'team_id': team_id}
response = _api.send_request(
req_type='DELETE',
path=f'/project/{project_id}/share',
params=params,
json_req=json_req
)
if not response.ok:
raise SABaseException(response.status_code, response.text)
logger.info("Unshared project %s from user ID %s", project["name"], user_id)
@Trackable
def upload_images_from_s3_bucket_to_project(
project,
accessKeyId,
secretAccessKey,
bucket_name,
folder_path,
image_quality_in_editor=None
):
"""Uploads all images from AWS S3 bucket to the project.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param accessKeyId: AWS S3 access key ID
:type accessKeyId: str
:param secretAccessKey: AWS S3 secret access key
:type secretAccessKey: str
:param bucket_name: AWS S3 bucket
:type bucket_name: str
:param folder_path: from which folder to upload the images
:type folder_path: str
:param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor.
Can be either "compressed" or "original". If None then the default value in project settings will be used.
:type image_quality_in_editor: str
"""
project, project_folder = get_project_and_folder_metadata(project)
if image_quality_in_editor is not None:
old_quality = get_project_default_image_quality_in_editor(project)
set_project_default_image_quality_in_editor(
project, image_quality_in_editor
)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
data = {
"accessKeyID": accessKeyId,
"secretAccessKey": secretAccessKey,
"bucketName": bucket_name,
"folderName": folder_path
}
if project_folder is not None:
data["folder_id"] = project_folder["id"]
response = _api.send_request(
req_type='POST',
path=f'/project/{project_id}/get-image-s3-access-point',
params=params,
json_req=data
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't upload to project from S3 " + response.text
)
logger.info("Waiting for S3 upload to finish.")
while True:
time.sleep(5)
res = _get_upload_from_s3_bucket_to_project_status(
project, project_folder
)
if res["progress"] == '2':
break
if res["progress"] != "1":
raise SABaseException(
response.status_code,
"Couldn't upload to project from S3 " + str(res)
)
if image_quality_in_editor is not None:
set_project_default_image_quality_in_editor(project, old_quality)
def _get_upload_from_s3_bucket_to_project_status(project, project_folder):
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
if project_folder is not None:
params["folder_id"] = project_folder["id"]
response = _api.send_request(
req_type='GET',
path=f'/project/{project_id}/getS3UploadStatus',
params=params
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't get upload to project from S3 status " + response.text
)
return response.json()
@Trackable
def get_project_workflow(project):
"""Gets project's workflow.
Return value example: [{ "step" : <step_num>, "className" : <annotation_class>, "tool" : <tool_num>, ...},...]
:param project: project name or metadata
:type project: str or dict
:return: project workflow
:rtype: list of dicts
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
response = _api.send_request(
req_type='GET', path=f'/project/{project_id}/workflow', params=params
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't get project workflow " + response.text
)
res = response.json()
annotation_classes = search_annotation_classes(project)
for r in res:
if "class_id" not in r:
continue
found_classid = False
for a_class in annotation_classes:
if a_class["id"] == r["class_id"]:
found_classid = True
r["className"] = a_class["name"]
del r["class_id"]
break
if not found_classid:
raise SABaseException(0, "Couldn't find class_id in workflow")
return res
@Trackable
def set_project_workflow(project, new_workflow):
"""Sets project's workflow.
new_workflow example: [{ "step" : <step_num>, "className" : <annotation_class>, "tool" : <tool_num>,
"attribute":[{"attribute" : {"name" : <attribute_value>, "attribute_group" : {"name": <attribute_group>}}},
...]
},...]
:param project: project name or metadata
:type project: str or dict
:param project: new workflow list of dicts
:type project: list of dicts
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
if not isinstance(new_workflow, list):
raise SABaseException(
0, "Set project setting new_workflow should be a list"
)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
annotation_classes = search_annotation_classes(project)
new_list = copy.deepcopy(new_workflow)
for step in new_list:
if "id" in step:
del step["id"]
if "className" not in step:
continue
for an_class in annotation_classes:
if an_class["name"] == step["className"]:
step["class_id"] = an_class["id"]
break
else:
raise SABaseException(
0, "Annotation class not found in set_project_workflow."
)
json_req = {"steps": [step]}
response = _api.send_request(
req_type='POST',
path=f'/project/{project_id}/workflow',
params=params,
json_req=json_req
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't set project workflow " + response.text
)
if "attribute" not in step:
continue
current_steps = get_project_workflow(project)
for step_in_response in current_steps:
if step_in_response["step"] == step["step"]:
workflow_id = step_in_response["id"]
break
else:
raise SABaseException(0, "Couldn't find step in workflow")
request_data = []
for attribute in step["attribute"]:
for att_class in an_class["attribute_groups"]:
if att_class["name"] == attribute["attribute"]["attribute_group"
]["name"]:
break
else:
raise SABaseException(
0, "Attribute group not found in set_project_workflow."
)
for att_value in att_class["attributes"]:
if att_value["name"] == attribute["attribute"]["name"]:
attribute_id = att_value["id"]
break
else:
raise SABaseException(
0, "Attribute value not found in set_project_workflow."
)
request_data.append(
{
"workflow_id": workflow_id,
"attribute_id": attribute_id
}
)
response = _api.send_request(
req_type='POST',
path=f'/project/{project_id}/workflow_attribute',
params=params,
json_req={"data": request_data}
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't set project workflow " + response.text
)
@Trackable
def get_project_settings(project):
"""Gets project's settings.
Return value example: [{ "attribute" : "Brightness", "value" : 10, ...},...]
:param project: project name or metadata
:type project: str or dict
:return: project settings
:rtype: list of dicts
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
response = _api.send_request(
req_type='GET', path=f'/project/{project_id}/settings', params=params
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't get project settings " + response.text
)
res = response.json()
for val in res:
if val['attribute'] == 'ImageQuality':
if val['value'] == 60:
val['value'] = 'compressed'
elif val['value'] == 100:
val['value'] = 'original'
else:
raise SABaseException(0, "NA ImageQuality value")
return res
@Trackable
def set_project_settings(project, new_settings):
"""Sets project's settings.
New settings format example: [{ "attribute" : "Brightness", "value" : 10, ...},...]
:param project: project name or metadata
:type project: str or dict
:param new_settings: new settings list of dicts
:type new_settings: list of dicts
:return: updated part of project's settings
:rtype: list of dicts
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
if not isinstance(new_settings, list):
raise SABaseException(
0, "Set project setting new_settings should be a list"
)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
current_settings = get_project_settings(project)
id_conv = {}
for setting in current_settings:
if "attribute" in setting:
id_conv[setting["attribute"]] = setting["id"]
new_list = []
for new_setting in new_settings:
if "attribute" in new_setting and new_setting["attribute"] in id_conv:
new_list.append(
{
"attribute": new_setting["attribute"],
"id": id_conv[new_setting["attribute"]],
"value": new_setting["value"]
}
)
for val in new_list:
if val['attribute'] == 'ImageQuality':
if val['value'] == 'compressed':
val['value'] = 60
elif val['value'] == 'original':
val['value'] = 100
else:
raise SABaseException(0, "NA ImageQuality value")
json_req = {"settings": new_list}
response = _api.send_request(
req_type='PUT',
path=f'/project/{project_id}/settings',
params=params,
json_req=json_req
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't set project settings " + response.text
)
return response.json()
@Trackable
def set_project_default_image_quality_in_editor(
project, image_quality_in_editor
):
"""Sets project's default image quality in editor setting.
:param project: project name or metadata
:type project: str or dict
:param image_quality_in_editor: new setting value, should be "original" or "compressed"
:type image_quality_in_editor: str
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
set_project_settings(
project,
[{
"attribute": "ImageQuality",
"value": image_quality_in_editor
}]
)
@Trackable
def get_project_default_image_quality_in_editor(project):
"""Gets project's default image quality in editor setting.
:param project: project name or metadata
:type project: str or dict
:return: "original" or "compressed" setting value
:rtype: str
"""
for setting in get_project_settings(project):
if "attribute" in setting and setting["attribute"] == "ImageQuality":
return setting["value"]
raise SABaseException(
0,
"Image quality in editor should be 'compressed', 'original' or None for project settings value"
)
@Trackable
def get_project_metadata(
project,
include_annotation_classes=False,
include_settings=False,
include_workflow=False,
include_contributors=False,
include_complete_image_count=False
):
"""Returns project metadata
:param project: project name
:type project: str
:param include_annotation_classes: enables project annotation classes output under
the key "annotation_classes"
:type include_annotation_classes: bool
:param include_settings: enables project settings output under
the key "settings"
:type include_settings: bool
:param include_workflow: enables project workflow output under
the key "workflow"
:type include_workflow: bool
:param include_contributors: enables project contributors output under
the key "contributors"
:type include_contributors: bool
:return: metadata of project
:rtype: dict
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(
project, include_complete_image_count
)
result = copy.deepcopy(project)
if include_annotation_classes:
result["annotation_classes"] = search_annotation_classes(project)
if include_contributors:
result["contributors"] = get_project_metadata_with_users(project
)["users"]
if include_settings:
result["settings"] = get_project_settings(project)
if include_workflow:
result["workflow"] = get_project_workflow(project)
return result
@Trackable
def clone_project(
project_name,
from_project,
project_description=None,
copy_annotation_classes=True,
copy_settings=True,
copy_workflow=True,
copy_contributors=False
):
"""Create a new project in the team using annotation classes and settings from from_project.
:param project_name: new project's name
:type project_name: str
:param from_project: the name of the project being used for duplication
:type from_project: str
:param project_description: the new project's description. If None, from_project's
description will be used
:type project_description: str
:param copy_annotation_classes: enables copying annotation classes
:type copy_annotation_classes: bool
:param copy_settings: enables copying project settings
:type copy_settings: bool
:param copy_workflow: enables copying project workflow
:type copy_workflow: bool
:param copy_contributors: enables copying project contributors
:type copy_contributors: bool
:return: dict object metadata of the new project
:rtype: dict
"""
get_project_metadata_bare(from_project)
try:
get_project_metadata_bare(project_name)
except SANonExistingProjectNameException:
pass
else:
raise SAExistingProjectNameException(
0, "Project with name " + project_name +
" already exists. Please use unique names for projects to use with SDK."
)
metadata = get_project_metadata(
from_project, copy_annotation_classes, copy_settings, copy_workflow,
copy_contributors
)
metadata["name"] = project_name
if project_description is not None:
metadata["description"] = project_description
return create_project_from_metadata(metadata)
@Trackable
def attach_video_urls_to_project(
project, attachments, annotation_status="NotStarted"
):
"""Link videos on external storage to SuperAnnotate.
:param project: project name or project folder path
:type project: str or dict
:param attachments: path to csv file on attachments metadata
:type attachments: Path-like (str or Path)
:param annotation_status: value to set the annotation statuses of the linked videos: NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:return: attached videos, failed videos, skipped videos
:rtype: (list, list, list)
"""
return attach_file_urls_to_project(project, attachments, annotation_status)
def attach_file_urls_to_project(project, attachments, annotation_status):
"""Link files on external storage to SuperAnnotate.
:param project: project name or project folder path
:type project: str or dict
:param attachments: path to csv file on attachments metadata
:type attachments: Path-like (str or Path)
:param annotation_status: value to set the annotation statuses of the linked files: NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:return: attached files, failed files, skipped files
:rtype: (list, list, list)
"""
project, folder = get_project_and_folder_metadata(project)
folder_name = project["name"] + (f'/{folder["name"]}' if folder else "")
upload_state = common.upload_state_int_to_str(project.get("upload_state"))
if upload_state == "Basic":
raise SABaseException(
0,
"You cannot attach URLs in this type of project. Please attach it in an external storage project"
)
annotation_status = common.annotation_status_str_to_int(annotation_status)
team_id, project_id = project["team_id"], project["id"]
df = pd.read_csv(attachments, dtype=str)
df = df[~df["url"].isnull()]
if "name" in df.columns:
df["name"] = df["name"].fillna("").apply(
lambda cell: cell if str(cell).strip() else str(uuid.uuid4())
)
else:
df["name"] = [str(uuid.uuid4()) for _ in range(len(df.index))]
df = | pd.DataFrame(df, columns=["name", "url"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
def create_date(start_date="2020-01-01", end_date="2020-12-31"):
"""Create a random date.
Parameters
----------
start_date : str, optional
The minimum possible date, by default '2020-01-01'. The format must be
yyyy-mm-dd.
end_date : str, optional
The maximum possible date, by default '2020-12-31'. The format must be
yyyy-mm-dd.
Returns
-------
pandas.Timestamp
A random date.
"""
start_date = pd.to_datetime([start_date])
end_date = | pd.to_datetime([end_date]) | pandas.to_datetime |
from typing import TYPE_CHECKING, List, Optional, Type, Union
import pandas as pd
from transformers.pipelines import Pipeline
from transformers.pipelines import pipeline as pipeline_factory
from transformers.pipelines.table_question_answering import (
TableQuestionAnsweringPipeline,
)
from ray.air._internal.checkpointing import load_preprocessor_from_dir
from ray.air.checkpoint import Checkpoint
from ray.air.constants import TENSOR_COLUMN_NAME
from ray.train.predictor import Predictor
if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
class HuggingFacePredictor(Predictor):
"""A predictor for HuggingFace Transformers PyTorch models.
This predictor uses Transformers Pipelines for inference.
Args:
pipeline: The Transformers pipeline to use for inference.
preprocessor: A preprocessor used to transform data batches prior
to prediction.
"""
def __init__(
self,
pipeline: Optional[Pipeline] = None,
preprocessor: Optional["Preprocessor"] = None,
):
self.pipeline = pipeline
self.preprocessor = preprocessor
@classmethod
def from_checkpoint(
cls,
checkpoint: Checkpoint,
*,
pipeline: Optional[Type[Pipeline]] = None,
**pipeline_kwargs,
) -> "HuggingFacePredictor":
"""Instantiate the predictor from a Checkpoint.
The checkpoint is expected to be a result of ``HuggingFaceTrainer``.
Args:
checkpoint: The checkpoint to load the model and
preprocessor from. It is expected to be from the result of a
``HuggingFaceTrainer`` run.
pipeline: A ``transformers.pipelines.Pipeline`` class to use.
If not specified, will use the ``pipeline`` abstraction
wrapper.
**pipeline_kwargs: Any kwargs to pass to the pipeline
initialization. If ``pipeline`` is None, this must contain
the 'task' argument. Cannot contain 'model'.
"""
if not pipeline and "task" not in pipeline_kwargs:
raise ValueError(
"If `pipeline` is not specified, 'task' must be passed as a kwarg."
)
pipeline = pipeline or pipeline_factory
with checkpoint.as_directory() as checkpoint_path:
preprocessor = load_preprocessor_from_dir(checkpoint_path)
pipeline = pipeline(model=checkpoint_path, **pipeline_kwargs)
return HuggingFacePredictor(
pipeline=pipeline,
preprocessor=preprocessor,
)
def _predict(
self, data: Union[list, pd.DataFrame], **pipeline_call_kwargs
) -> pd.DataFrame:
ret = self.pipeline(data, **pipeline_call_kwargs)
# Remove unnecessary lists
try:
new_ret = [x[0] if isinstance(x, list) and len(x) == 1 else x for x in ret]
df = pd.DataFrame(new_ret)
except Exception:
# if we fail for any reason, just give up
df = pd.DataFrame(ret)
df.columns = [str(col) for col in df.columns]
return df
def _convert_data_for_pipeline(
self, data: pd.DataFrame
) -> Union[list, pd.DataFrame]:
"""Convert the data into a format accepted by the pipeline.
In most cases, this format is a list of strings."""
# Special case
if isinstance(self.pipeline, TableQuestionAnsweringPipeline):
return data
# Otherwise, a list of columns as lists
columns = [data[col].to_list() for col in data.columns]
# Flatten if it's only one column
if len(columns) == 1:
columns = columns[0]
return columns
def _predict_pandas(
self,
data: "pd.DataFrame",
feature_columns: Optional[List[str]] = None,
**pipeline_call_kwargs,
) -> "pd.DataFrame":
"""Run inference on data batch.
The data is converted into a list (unless ``pipeline`` is a
``TableQuestionAnsweringPipeline``) and passed to the ``pipeline``
object.
Args:
data: A batch of input data. Either a pandas DataFrame or numpy
array.
feature_columns: The names or indices of the columns in the
data to use as features to predict on. If None, use all
columns.
**pipeline_call_kwargs: additional kwargs to pass to the
``pipeline`` object.
Examples:
.. code-block:: python
import pandas as pd
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
from transformers.pipelines import pipeline
from ray.train.huggingface import HuggingFacePredictor
model_checkpoint = "gpt2"
tokenizer_checkpoint = "sgugger/gpt2-like-tokenizer"
tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)
model_config = AutoConfig.from_pretrained(model_checkpoint)
model = AutoModelForCausalLM.from_config(model_config)
predictor = HuggingFacePredictor(
pipeline=pipeline(
task="text-generation", model=model, tokenizer=tokenizer
)
)
prompts = pd.DataFrame(
["Complete me", "And me", "Please complete"], columns=["sentences"]
)
predictions = predictor.predict(prompts)
Returns:
Prediction result.
"""
if TENSOR_COLUMN_NAME in data:
arr = data[TENSOR_COLUMN_NAME].to_numpy()
if feature_columns:
data = | pd.DataFrame(arr[:, feature_columns]) | pandas.DataFrame |
# fmt: off
import h5py
import os
import shutil
import copy
import h5py_cache
import pickle as pkl
import numpy as np
import pandas as pd
import ipywidgets as ipyw
from nd2reader import ND2Reader
from tifffile import imsave, imread
from .utils import pandas_hdf5_handler,writedir
from parse import compile
class hdf5_fov_extractor:
def __init__(self,nd2filename,headpath,tpts_per_file=100,ignore_fovmetadata=False,nd2reader_override={}): #note this chunk size has a large role in downstream steps...make sure is less than 1 MB
self.nd2filename = nd2filename
self.headpath = headpath
self.metapath = self.headpath + "/metadata.hdf5"
self.hdf5path = self.headpath + "/hdf5"
self.tpts_per_file = tpts_per_file
self.ignore_fovmetadata = ignore_fovmetadata
self.nd2reader_override = nd2reader_override
self.organism = ''
self.microscope = ''
self.notes = ''
def writemetadata(self,t_range=None,fov_list=None):
ndmeta_handle = nd_metadata_handler(self.nd2filename,ignore_fovmetadata=self.ignore_fovmetadata,nd2reader_override=self.nd2reader_override)
if self.ignore_fovmetadata:
exp_metadata = ndmeta_handle.get_metadata()
else:
exp_metadata,fov_metadata = ndmeta_handle.get_metadata()
if t_range is not None:
exp_metadata["frames"] = exp_metadata["frames"][t_range[0]:t_range[1]+1]
exp_metadata["num_frames"] = len(exp_metadata["frames"])
fov_metadata = fov_metadata.loc[pd.IndexSlice[:,slice(t_range[0],t_range[1])],:] #4 -> 70
if fov_list is not None:
fov_metadata = fov_metadata.loc[list(fov_list)]
exp_metadata["fields_of_view"] = list(fov_list)
self.chunk_shape = (1,exp_metadata["height"],exp_metadata["width"])
chunk_bytes = (2*np.multiply.accumulate(np.array(self.chunk_shape))[-1])
self.chunk_cache_mem_size = 2*chunk_bytes
exp_metadata["chunk_shape"],exp_metadata["chunk_cache_mem_size"] = (self.chunk_shape,self.chunk_cache_mem_size)
exp_metadata["Organism"],exp_metadata["Microscope"],exp_metadata["Notes"] = (self.organism,self.microscope,self.notes)
self.meta_handle = pandas_hdf5_handler(self.metapath)
if self.ignore_fovmetadata:
assignment_metadata = self.assignidx(exp_metadata,metadf=None)
assignment_metadata.astype({"File Index":int,"Image Index":int})
else:
assignment_metadata = self.assignidx(exp_metadata,metadf=fov_metadata)
assignment_metadata.astype({"t":float,"x": float,"y":float,"z":float,"File Index":int,"Image Index":int})
self.meta_handle.write_df("global",assignment_metadata,metadata=exp_metadata)
def assignidx(self,expmeta,metadf=None):
if metadf is None:
numfovs = len(expmeta["fields_of_view"])
timepoints_per_fov = len(expmeta["frames"])
else:
numfovs = len(metadf.index.get_level_values(0).unique().tolist())
timepoints_per_fov = len(metadf.index.get_level_values(1).unique().tolist())
files_per_fov = (timepoints_per_fov//self.tpts_per_file) + 1
remainder = timepoints_per_fov%self.tpts_per_file
ttlfiles = numfovs*files_per_fov
fov_file_idx = np.repeat(list(range(files_per_fov)), self.tpts_per_file)[:-(self.tpts_per_file-remainder)]
file_idx = np.concatenate([fov_file_idx+(fov_idx*files_per_fov) for fov_idx in range(numfovs)])
fov_img_idx = np.repeat(np.array(list(range(self.tpts_per_file)))[np.newaxis,:],files_per_fov,axis=0)
fov_img_idx = fov_img_idx.flatten()[:-(self.tpts_per_file-remainder)]
img_idx = np.concatenate([fov_img_idx for fov_idx in range(numfovs)])
if metadf is None:
fov_idx = np.repeat(list(range(numfovs)), timepoints_per_fov)
timepoint_idx = np.repeat(np.array(list(range(timepoints_per_fov)))[np.newaxis,:],numfovs,axis=0).flatten()
data = {"fov" : fov_idx,"timepoints" : timepoint_idx,"File Index" : file_idx, "Image Index" : img_idx}
outdf = pd.DataFrame(data)
outdf = outdf.set_index(["fov","timepoints"], drop=True, append=False, inplace=False)
else:
outdf = copy.deepcopy(metadf)
outdf["File Index"] = file_idx
outdf["Image Index"] = img_idx
return outdf
def read_metadata(self):
writedir(self.hdf5path,overwrite=True)
self.writemetadata()
metadf = self.meta_handle.read_df("global",read_metadata=True)
self.metadata = metadf.metadata
metadf = metadf.reset_index(inplace=False)
metadf = metadf.set_index(["File Index","Image Index"], drop=True, append=False, inplace=False)
self.metadf = metadf.sort_index()
def set_params(self,fov_list,t_range,organism,microscope,notes):
self.fov_list = fov_list
self.t_range = t_range
self.organism = organism
self.microscope = microscope
self.notes = notes
def inter_set_params(self):
self.read_metadata()
t0,tf = (self.metadata['frames'][0],self.metadata['frames'][-1])
available_fov_list = self.metadf["fov"].unique().tolist()
selection = ipyw.interactive(self.set_params, {"manual":True}, fov_list=ipyw.SelectMultiple(options=available_fov_list),\
t_range=ipyw.IntRangeSlider(value=[t0, tf],\
min=t0,max=tf,step=1,description='Time Range:',disabled=False), organism=ipyw.Textarea(value='',\
placeholder='Organism imaged in this experiment.',description='Organism:',disabled=False),\
microscope=ipyw.Textarea(value='',placeholder='Microscope used in this experiment.',\
description='Microscope:',disabled=False),notes=ipyw.Textarea(value='',\
placeholder='General experiment notes.',description='Notes:',disabled=False),)
display(selection)
def extract(self,dask_controller):
dask_controller.futures = {}
self.writemetadata(t_range=self.t_range,fov_list=self.fov_list)
metadf = self.meta_handle.read_df("global",read_metadata=True)
self.metadata = metadf.metadata
metadf = metadf.reset_index(inplace=False)
metadf = metadf.set_index(["File Index","Image Index"], drop=True, append=False, inplace=False)
self.metadf = metadf.sort_index()
def writehdf5(fovnum,num_entries,timepoint_list,file_idx,num_fovs):
with ND2Reader(self.nd2filename) as nd2file:
for key,item in self.nd2reader_override.items():
nd2file.metadata[key] = item
y_dim = self.metadata['height']
x_dim = self.metadata['width']
with h5py_cache.File(self.hdf5path + "/hdf5_" + str(file_idx) + ".hdf5","w",chunk_cache_mem_size=self.chunk_cache_mem_size) as h5pyfile:
for i,channel in enumerate(self.metadata["channels"]):
hdf5_dataset = h5pyfile.create_dataset(str(channel),\
(num_entries,y_dim,x_dim), chunks=self.chunk_shape, dtype='uint16')
for j in range(len(timepoint_list)):
frame = timepoint_list[j]
nd2_image = nd2file.get_frame_2D(c=i, t=frame, v=fovnum)
hdf5_dataset[j,:,:] = nd2_image
return "Done."
file_list = self.metadf.index.get_level_values("File Index").unique().values
num_jobs = len(file_list)
random_priorities = np.random.uniform(size=(num_jobs,))
for k,file_idx in enumerate(file_list):
priority = random_priorities[k]
filedf = self.metadf.loc[file_idx]
fovnum = filedf[0:1]["fov"].values[0]
num_entries = len(filedf.index.get_level_values("Image Index").values)
timepoint_list = filedf["timepoints"].tolist()
future = dask_controller.daskclient.submit(writehdf5,fovnum,num_entries,timepoint_list,file_idx,self.metadata["num_fovs"],retries=1,priority=priority)
dask_controller.futures["extract file: " + str(file_idx)] = future
extracted_futures = [dask_controller.futures["extract file: " + str(file_idx)] for file_idx in file_list]
pause_for_extract = dask_controller.daskclient.gather(extracted_futures,errors='skip')
futures_name_list = ["extract file: " + str(file_idx) for file_idx in file_list]
failed_files = [futures_name_list[k] for k,item in enumerate(extracted_futures) if item.status is not "finished"]
failed_file_idx = [int(item.split(":")[1]) for item in failed_files]
outdf = self.meta_handle.read_df("global",read_metadata=False)
tempmeta = outdf.reset_index(inplace=False)
tempmeta = tempmeta.set_index(["File Index","Image Index"], drop=True, append=False, inplace=False)
failed_fovs = tempmeta.loc[failed_file_idx]["fov"].unique().tolist()
outdf = outdf.drop(failed_fovs)
if self.t_range is not None:
outdf = outdf.reset_index(inplace=False)
outdf["timepoints"] = outdf["timepoints"] - self.t_range[0]
outdf = outdf.set_index(["fov","timepoints"], drop=True, append=False, inplace=False)
self.meta_handle.write_df("global",outdf,metadata=self.metadata)
class nd_metadata_handler:
def __init__(self,nd2filename,ignore_fovmetadata=False,nd2reader_override={}):
self.nd2filename = nd2filename
self.ignore_fovmetadata = ignore_fovmetadata
self.nd2reader_override = nd2reader_override
def decode_unidict(self,unidict):
outdict = {}
for key, val in unidict.items():
if type(key) == bytes:
key = key.decode('utf8')
if type(val) == bytes:
val = val.decode('utf8')
outdict[key] = val
return outdict
def read_specsettings(self,SpecSettings):
spec_list = SpecSettings.decode('utf-8').split('\r\n')[1:]
spec_list = [item for item in spec_list if ":" in item]
spec_dict = {item.split(": ")[0].replace(" ", "_"):item.split(": ")[1].replace(" ", "_") for item in spec_list}
return spec_dict
def get_imaging_settings(self,nd2file):
raw_metadata = nd2file.parser._raw_metadata
imaging_settings = {}
for key,meta in raw_metadata.image_metadata_sequence[b'SLxPictureMetadata'][b'sPicturePlanes'][b'sSampleSetting'].items():
camera_settings = meta[b'pCameraSetting']
camera_name = camera_settings[b'CameraUserName'].decode('utf-8')
channel_name = camera_settings[b'Metadata'][b'Channels'][b'Channel_0'][b'Name'].decode('utf-8')
obj_settings = self.decode_unidict(meta[b'pObjectiveSetting'])
spec_settings = self.read_specsettings(meta[b'sSpecSettings'])
imaging_settings[channel_name] = {'camera_name':camera_name,'obj_settings':obj_settings,**spec_settings}
return imaging_settings
def make_fov_df(self,nd2file, exp_metadata): #only records values for single timepoints, does not seperate between channels....
img_metadata = nd2file.parser._raw_metadata
num_fovs = exp_metadata['num_fovs']
num_frames = exp_metadata['num_frames']
num_images_expected = num_fovs*num_frames
if img_metadata.x_data is not None:
x = np.reshape(img_metadata.x_data,(-1,num_fovs)).T
y = np.reshape(img_metadata.y_data,(-1,num_fovs)).T
z = np.reshape(img_metadata.z_data,(-1,num_fovs)).T
else:
positions = img_metadata.image_metadata[b'SLxExperiment'][b'ppNextLevelEx'][b''][b'uLoopPars'][b'Points'][b'']
x = []
y = []
z = []
for position in positions:
x.append([position[b'dPosX']]*num_frames)
y.append([position[b'dPosY']]*num_frames)
z.append([position[b'dPosZ']]*num_frames)
x = np.array(x)
y = np.array(y)
z = np.array(z)
time_points = x.shape[1]
acq_times = np.reshape(np.array(list(img_metadata.acquisition_times)[:num_images_expected]),(-1,num_fovs)).T
pos_label = np.repeat(np.expand_dims(np.add.accumulate(np.ones(num_fovs,dtype=int))-1,1),time_points,1) ##???
time_point_labels = np.repeat(np.expand_dims(np.add.accumulate(np.ones(time_points,dtype=int))-1,1),num_fovs,1).T
output = pd.DataFrame({'fov':pos_label.flatten(),'timepoints':time_point_labels.flatten(),'t':acq_times.flatten(),'x':x.flatten(),'y':y.flatten(),'z':z.flatten()})
output = output.astype({'fov': int, 'timepoints':int, 't': float, 'x': float,'y': float,'z': float})
output = output[~((output['x'] == 0.)&(output['y'] == 0.)&(output['z'] == 0.))].reset_index(drop=True) ##bootstrapped to fix issue when only some FOVs are selected (return if it causes problems in the future)
output = output.set_index(["fov","timepoints"], drop=True, append=False, inplace=False)
return output
def get_metadata(self):
# Manual numbers are for broken .nd2 files (from when Elements crashes)
nd2file = ND2Reader(self.nd2filename)
for key,item in self.nd2reader_override.items():
nd2file.metadata[key] = item
exp_metadata = copy.copy(nd2file.metadata)
wanted_keys = ['height', 'width', 'date', 'fields_of_view', 'frames', 'z_levels', 'z_coordinates', 'total_images_per_channel', 'channels', 'pixel_microns', 'num_frames', 'experiment']
exp_metadata = dict([(k, exp_metadata[k]) for k in wanted_keys if k in exp_metadata])
exp_metadata["num_fovs"] = len(exp_metadata['fields_of_view'])
exp_metadata["settings"] = self.get_imaging_settings(nd2file)
if not self.ignore_fovmetadata:
fov_metadata = self.make_fov_df(nd2file, exp_metadata)
nd2file.close()
return exp_metadata,fov_metadata
else:
nd2file.close()
return exp_metadata
class tiff_to_hdf5_extractor:
"""Utility to convert individual tiff files to hdf5 archives.
Attributes:
headpath (str): base directory for data analysis
tiffpath (str): directory where tiff files are located
metapath (str): metadata path
hdf5path (str): where to store hdf5 data
tpts_per_file (int): number of timepoints to put in each hdf5 file
format_string (str): format of filenames from which to extract metadata (using parse library)
"""
def __init__(self, headpath, tiffpath, format_string, tpts_per_file=100, manual_metadata_params={}):
self.tiffpath = tiffpath
self.headpath = headpath
self.metapath = self.headpath + "/metadata.hdf5"
self.hdf5path = self.headpath + "/hdf5"
self.tpts_per_file = tpts_per_file
self.format_string = format_string
self.manual_metadata_params = manual_metadata_params
def get_notes(self,organism,microscope,notes):
"""Get note metadata.
Inputs:
organism (str): organism
microscope (str): microscope
notes (str): notes
"""
self.organism = organism
self.microscope = microscope
self.notes = notes
def inter_get_notes(self):
"""Get notes interactively using ipywidgets."""
selection = ipyw.interactive(self.get_notes, {"manual":True}, organism=ipyw.Textarea(value='',\
placeholder='Organism imaged in this experiment.',description='Organism:',disabled=False),\
microscope=ipyw.Textarea(value='',placeholder='Microscope used in this experiment.',\
description='Microscope:',disabled=False),notes=ipyw.Textarea(value='',\
placeholder='General experiment notes.',description='Notes:',disabled=False),)
display(selection)
def assignidx(self,metadf):
"""Get indices for each image in each file (for metadata)
Args:
metadf (pandas.DataFrame): metadata without file indices
Returns:
outdf (pandas.DataFrame): metadata with file indices
"""
outdf = copy.deepcopy(metadf)
# get number of each dimension of the data
numchannels = len(pd.unique(metadf["channel"]))
numfovs = len(metadf.index.get_level_values("fov").unique())
timepoints_per_fov = len(metadf.index.get_level_values("timepoints").unique())
# Calculate number of files required for the number of timepoints
files_per_fov = (timepoints_per_fov//self.tpts_per_file) + 1
remainder = timepoints_per_fov%self.tpts_per_file
# Assign file indices to each individual image in a field of view
fov_file_idx = np.repeat(list(range(files_per_fov)), self.tpts_per_file*numchannels)[:-(self.tpts_per_file-remainder)*numchannels]
file_idx = np.concatenate([fov_file_idx+(fov_idx*files_per_fov) for fov_idx in range(numfovs)])
# Assign image indices within a file
fov_img_idx = np.repeat(np.repeat(np.array(list(range(self.tpts_per_file))), numchannels)[np.newaxis,:],files_per_fov,axis=0)
fov_img_idx = fov_img_idx.flatten()[:-(self.tpts_per_file-remainder)*numchannels]
img_idx = np.concatenate([fov_img_idx for fov_idx in range(numfovs)])
outdf["File Index"] = file_idx
outdf["Image Index"] = img_idx
return outdf
def writemetadata(self, parser, tiff_files, manual_metadata_params={}):
"""Write metadata.
Args:
parser (parser): compiled parser to find metadata
tiff_files (list, str): list of full paths to each tiff file
Returns:
channel_paths_by_file_index (list, tuple): Group files that represent multiple channels
for a single field of view
"""
fov_metadata = {}
exp_metadata = {}
assignment_metadata = {}
first_successful_file= True
for f in tiff_files:
match = parser.search(f)
# ignore any files that don't match the regex
if match is not None:
if first_successful_file:
# Build metadata
first_img = imread(f)
# get dimensions by loading file
exp_metadata["height"] = first_img.shape[0]
exp_metadata["width"] = first_img.shape[1]
exp_metadata["Organism"] = self.organism
exp_metadata["Microscope"] = self.microscope
exp_metadata["Notes"] = self.notes
self.chunk_shape = (1,exp_metadata["height"],exp_metadata["width"])
chunk_bytes = (2*np.multiply.accumulate(np.array(self.chunk_shape))[-1])
self.chunk_cache_mem_size = 2*chunk_bytes
exp_metadata["chunk_shape"],exp_metadata["chunk_cache_mem_size"] = (self.chunk_shape,self.chunk_cache_mem_size)
# get metadata from the file name
fov_metadata = dict([(key, [value]) for key, value in match.named.items()])
fov_metadata["Image Path"] = [f]
first_successful_file = False
else:
# Add to dictionary
fov_frame_dict = match.named
for key, value in fov_frame_dict.items():
fov_metadata[key].append(value)
fov_metadata["Image Path"].append(f)
if "lane" not in fov_metadata:
fov_metadata["lane"] = [1]*len(fov_metadata["Image Path"])
if "x" not in fov_metadata:
fov_metadata["x"] = [0]*len(fov_metadata["Image Path"])
if "y" not in fov_metadata:
fov_metadata["y"] = [0]*len(fov_metadata["Image Path"])
fov_metadata["t"] = fov_metadata["timepoints"]
# Convert dictionary to dataframe
fov_metadata = pd.DataFrame(fov_metadata)
exp_metadata["num_frames"] = len(pd.unique(fov_metadata["timepoints"]))
exp_metadata["channels"] = list( | pd.unique(fov_metadata["channel"]) | pandas.unique |
from typing import Any, Dict, List, Tuple
import pandas as pd
from dateutil.relativedelta import relativedelta
from django.db import connection
from django.db.models import F, Q, QuerySet
from django.db.models.expressions import Window
from django.db.models.functions import Lag
from django.utils.timezone import now
from posthog.api.element import ElementSerializer
from posthog.constants import SESSION_AVG, SESSION_DIST
from posthog.models import ElementGroup, Event, Filter, Team
from posthog.queries.base import BaseQuery, convert_to_comparison, determine_compared_filter
from posthog.queries.session_recording import add_session_recording_ids
from posthog.utils import append_data, dict_from_cursor_fetchall, friendly_time
SESSIONS_LIST_DEFAULT_LIMIT = 50
DIST_LABELS = [
"0 seconds (1 event)",
"0-3 seconds",
"3-10 seconds",
"10-30 seconds",
"30-60 seconds",
"1-3 minutes",
"3-10 minutes",
"10-30 minutes",
"30-60 minutes",
"1+ hours",
]
class Sessions(BaseQuery):
def run(self, filter: Filter, team: Team, *args, **kwargs) -> List[Dict[str, Any]]:
events = (
Event.objects.filter(team=team)
.filter(filter.properties_to_Q(team_id=team.pk))
.add_person_id(team.pk)
.order_by("-timestamp")
)
limit = int(kwargs.get("limit", SESSIONS_LIST_DEFAULT_LIMIT))
offset = filter.offset
calculated = []
# get compared period
if filter.compare and filter._date_from != "all" and filter.session_type == SESSION_AVG:
calculated = self.calculate_sessions(events.filter(filter.date_filter_Q), filter, team, limit, offset)
calculated = convert_to_comparison(calculated, filter, "current")
compare_filter = determine_compared_filter(filter)
compared_calculated = self.calculate_sessions(
events.filter(compare_filter.date_filter_Q), compare_filter, team, limit, offset
)
converted_compared_calculated = convert_to_comparison(compared_calculated, filter, "previous")
calculated.extend(converted_compared_calculated)
else:
# if session_type is None, it's a list of sessions which shouldn't have any date filtering
if filter.session_type is not None:
events = events.filter(filter.date_filter_Q)
calculated = self.calculate_sessions(events, filter, team, limit, offset)
return calculated
def calculate_sessions(
self, events: QuerySet, filter: Filter, team: Team, limit: int, offset: int
) -> List[Dict[str, Any]]:
# format date filter for session view
_date_gte = Q()
if filter.session_type is None:
# if _date_from is not explicitely set we only want to get the last day worth of data
# otherwise the query is very slow
if filter._date_from and filter.date_to:
_date_gte = Q(timestamp__gte=filter.date_from, timestamp__lte=filter.date_to + relativedelta(days=1),)
else:
dt = now()
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
_date_gte = Q(timestamp__gte=dt, timestamp__lte=dt + relativedelta(days=1))
else:
if not filter.date_from:
filter._date_from = (
Event.objects.filter(team_id=team)
.order_by("timestamp")[0]
.timestamp.replace(hour=0, minute=0, second=0, microsecond=0)
.isoformat()
)
sessions = (
events.filter(_date_gte)
.annotate(
previous_timestamp=Window(
expression=Lag("timestamp", default=None),
partition_by=F("distinct_id"),
order_by=F("timestamp").asc(),
)
)
.annotate(
previous_event=Window(
expression=Lag("event", default=None), partition_by=F("distinct_id"), order_by=F("timestamp").asc(),
)
)
)
sessions_sql, sessions_sql_params = sessions.query.sql_with_params()
all_sessions = "\
SELECT *,\
SUM(new_session) OVER (ORDER BY distinct_id, timestamp) AS global_session_id,\
SUM(new_session) OVER (PARTITION BY distinct_id ORDER BY timestamp) AS user_session_id\
FROM (SELECT id, team_id, distinct_id, event, elements_hash, timestamp, properties, CASE WHEN EXTRACT('EPOCH' FROM (timestamp - previous_timestamp)) >= (60 * 30)\
OR previous_timestamp IS NULL \
THEN 1 ELSE 0 END AS new_session \
FROM ({}) AS inner_sessions\
) AS outer_sessions".format(
sessions_sql
)
result: List = []
if filter.session_type == SESSION_AVG:
result = self._session_avg(all_sessions, sessions_sql_params, filter)
elif filter.session_type == SESSION_DIST:
result = self._session_dist(all_sessions, sessions_sql_params)
else:
result = self._session_list(all_sessions, sessions_sql_params, team, filter, limit, offset)
return result
def _session_list(
self, base_query: str, params: Tuple[Any, ...], team: Team, filter: Filter, limit: int, offset: int
) -> List[Dict[str, Any]]:
session_list = """
SELECT
*
FROM (
SELECT
global_session_id,
properties,
start_time,
end_time,
length,
sessions.distinct_id,
event_count,
events
FROM (
SELECT
global_session_id,
count(1) as event_count,
MAX(distinct_id) as distinct_id,
EXTRACT('EPOCH' FROM (MAX(timestamp) - MIN(timestamp))) AS length,
MIN(timestamp) as start_time,
MAX(timestamp) as end_time,
array_agg(json_build_object( 'id', id, 'event', event, 'timestamp', timestamp, 'properties', properties, 'elements_hash', elements_hash) ORDER BY timestamp) as events
FROM
({base_query}) as count
GROUP BY 1
) as sessions
LEFT OUTER JOIN
posthog_persondistinctid ON posthog_persondistinctid.distinct_id = sessions.distinct_id AND posthog_persondistinctid.team_id = %s
LEFT OUTER JOIN
posthog_person ON posthog_person.id = posthog_persondistinctid.person_id
ORDER BY
start_time DESC
) as ordered_sessions
OFFSET %s
LIMIT %s
""".format(
base_query=base_query
)
with connection.cursor() as cursor:
params = params + (team.pk, offset, limit,)
cursor.execute(session_list, params)
sessions = dict_from_cursor_fetchall(cursor)
hash_ids = []
for session in sessions:
for event in session["events"]:
if event.get("elements_hash"):
hash_ids.append(event["elements_hash"])
groups = self._prefetch_elements(hash_ids, team)
for session in sessions:
for event in session["events"]:
try:
event.update(
{
"elements": ElementSerializer(
[group for group in groups if group.hash == event["elements_hash"]][0]
.element_set.all()
.order_by("order"),
many=True,
).data
}
)
except IndexError:
event.update({"elements": []})
return add_session_recording_ids(team, sessions)
def _session_avg(self, base_query: str, params: Tuple[Any, ...], filter: Filter) -> List[Dict[str, Any]]:
def _determineInterval(interval):
if interval == "minute":
return (
"minute",
"min",
)
elif interval == "hour":
return "hour", "H"
elif interval == "week":
return "week", "W"
elif interval == "month":
return "month", "M"
else:
return "day", "D"
interval, interval_freq = _determineInterval(filter.interval)
average_length_time = "SELECT date_trunc('{interval}', timestamp) as start_time,\
AVG(length) AS average_session_length_per_day,\
SUM(length) AS total_session_length_per_day, \
COUNT(1) as num_sessions_per_day\
FROM (SELECT global_session_id, EXTRACT('EPOCH' FROM (MAX(timestamp) - MIN(timestamp)))\
AS length,\
MIN(timestamp) as timestamp FROM ({}) as count GROUP BY 1) as agg group by 1 order by start_time".format(
base_query, interval=interval
)
cursor = connection.cursor()
cursor.execute(average_length_time, params)
time_series_avg = cursor.fetchall()
if len(time_series_avg) == 0:
return []
date_range = pd.date_range(filter.date_from, filter.date_to, freq=interval_freq,)
df = pd.DataFrame([{"date": a[0], "count": a[1], "breakdown": "Total"} for a in time_series_avg])
if interval == "week":
df["date"] = df["date"].apply(lambda x: x - | pd.offsets.Week(weekday=6) | pandas.offsets.Week |
import pandas as pd
import numpy as np
import json
import csv
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm_notebook as tqdm
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.stem import WordNetLemmatizer
import nltk
nltk.download('averaged_perceptron_tagger')
import spacy
import math
import string
import sys
import random
from collections import Counter
from itertools import chain
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
ge=pd.read_csv('./testgoodwordse.csv')
gn=pd.read_csv('./testgoodwordsn.csv')
gc=pd.read_csv('./testgoodwordsc.csv')
be=pd.read_csv('./testbadwordse.csv')
bn=pd.read_csv('./testbadwordsn.csv')
bc=pd.read_csv('./testbadwordsc.csv')
glabel=pd.read_csv('./testgoodwords.csv')
blabel=pd.read_csv('./testbadwords.csv')
nlp = spacy.load("en_trf_bertbaseuncased_lg")
def find_ngrams(input_list, n):
return list(zip(*[input_list[i:] for i in range(n)]))
data = blabel['fullnopunc'].to_list()
df = | pd.DataFrame(data,columns=['sentence']) | pandas.DataFrame |
import pandas as pd
def evaluate_month(df, month):
mask = (df['AAAA-MM'] == month)
df_month = df[mask]
df_month = df_month[['Concepto', 'Débito', 'Crédito']]
moves = df_month.groupby(by=['Concepto']).sum()
debit = moves['Débito'].sum() / 100.0
credit = moves['Crédito'].sum() / 100.0
moves = moves / 100.0
return debit, credit, moves, df_month
def load_Patagonia(filename):
df = pd.read_excel(filename,
skiprows=4,
skipfooter=1,
usecols=[1, 2, 4, 6, 8]).fillna(value=0)
df.columns = ['Fecha', 'Concepto', 'Débito', 'Crédito', 'Saldo']
df['Concepto'] = df['Concepto'].str.replace('\n',' ')
df['Débito'] = (df['Débito']*100).astype('int32')
df['Crédito'] = (df['Crédito']*100).astype('int32')
df['Saldo'] = (df['Saldo']*100).astype('int32')
df['AAAA-MM'] = | pd.to_datetime(df['Fecha']) | pandas.to_datetime |
"""
WEEK 02:Classification Project with Titatnic Data
This program will run a set of steps to calculate the survival predictions of Titanic passengers using Classification Models
For more details on EDA and model selection, please check the jupyter notebook version of this program (week02_project.ipynb).
###Step 1: Load Data
###Step 2: Train-Test Split (df_train and df_test)
###Step 3: Feature Engineering on df_train and df_test
###Step 4: Train Models (Logistic Reg, Random Forest) + Cross validation
###Step 5: Make predictions for Titanic Kaggle challenge and save results in a csv file
"""
# Packages
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import Pipeline
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.api import add_constant
from statsmodels.api import OLS, add_constant
from sklearn.feature_selection import RFE
import statsmodels.api as sm
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import plot_roc_curve, auc, roc_curve
from sklearn.model_selection import cross_val_score
pd.options.mode.chained_assignment = None
# Functions
def clean_data(df):
df['Age'].fillna(df['Age'].mean(), inplace=True)
df['Age'] = | pd.to_numeric(df['Age']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
import os
from typing import IO
import pandas as pd
from PySDDP.dessem.script.templates.restseg import RestsegTemplate
COMENTARIO = '&'
CABECALHO = 'X'
class Restseg(RestsegTemplate):
"""
Classe que contem todos os elementos comuns a qualquer versao do arquivo Restseg do Dessem.
Esta classe tem como intuito fornecer duck typing para a classe Dessem e ainda adicionar um nivel de especificacao
dentro da fabrica. Alem disso esta classe deve passar adiante a responsabilidade da implementacao dos metodos de
leitura e escrita
"""
def __init__(self):
super().__init__()
self.tabseg_indice = dict()
self.tabseg_tabela = dict()
self.tabseg_limite = dict()
self.tabseg_celula = dict()
self.tabseg_indice_df: pd.DataFrame()
self.tabseg_tabela_df: pd.DataFrame()
self.tabseg_limite_df: pd.DataFrame()
self.tabseg_celula_df: pd.DataFrame()
self.restseg = None
self._comentarios_ = None
def ler(self, file_name: str) -> None:
"""
Metodo para leitura do arquivo com as restricoes de seguranca representadas por tabelas
Manual do Usuario III.2 Arquivo contendo informações sobre os limites de segurança para a rede eletrica
fornecidos por tabelas(RESTSEG.XXX).
:param file_name: string com o caminho completo para o arquivo
:return:
"""
dir_base = os.path.split(file_name)[0]
# Listas referentes a TABSEG INDICE
self.tabseg_indice['mneumo'] = list()
self.tabseg_indice['num'] = list()
self.tabseg_indice['descricao'] = list()
# Listas referentes a TABSEG TABELA
self.tabseg_tabela['mneumo'] = list()
self.tabseg_tabela['num1'] = list()
self.tabseg_tabela['tipo1'] = list()
self.tabseg_tabela['tipo2'] = list()
self.tabseg_tabela['num2'] = list()
self.tabseg_tabela['carg'] = list()
# Listas referentes a TABSEG LIMITE
self.tabseg_limite['mneumo'] = list()
self.tabseg_limite['num'] = list()
self.tabseg_limite['var_parm_1'] = list()
self.tabseg_limite['var_parm_2'] = list()
self.tabseg_limite['var_parm_3'] = list()
# Listas referentes a TABSEG CELULA
self.tabseg_celula['mneumo'] = list()
self.tabseg_celula['num'] = list()
self.tabseg_celula['limite'] = list()
self.tabseg_celula['f'] = list()
self.tabseg_celula['par_1_inf'] = list()
self.tabseg_celula['par_1_sup'] = list()
self.tabseg_celula['par_2_inf'] = list()
self.tabseg_celula['par_2_sup'] = list()
self.tabseg_celula['par_3_inf'] = list()
self.tabseg_celula['par_3_sup'] = list()
self.restseg = list()
self._comentarios_ = list()
# noinspection PyBroadException
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
# Seguir o manual do usuario
continua = True
while continua:
self.next_line(f)
linha = self.linha.strip()
# Se a linha for comentario não faço nada e pulo pra proxima linha
if linha[0] == COMENTARIO:
self._comentarios_.append(linha)
self.restseg.append(linha)
continue
if linha[0] == CABECALHO:
self.restseg.append(linha)
continue
mneumo = linha[:13].strip().lower()
self.restseg.append(linha[:13])
# Leitura dos dados de acordo com o mneumo correspondente
if mneumo == 'tabseg indice':
self.tabseg_indice['mneumo'].append(self.linha[:13])
self.tabseg_indice['num'].append(self.linha[14:19])
self.tabseg_indice['descricao'].append(self.linha[20:80])
self.dados['tabseg_indice']['valores'] = self.tabseg_indice
self.tabseg_indice_df = pd.DataFrame(self.tabseg_indice)
continue
if mneumo == 'tabseg tabela':
self.tabseg_tabela['mneumo'].append(self.linha[:13])
self.tabseg_tabela['num1'].append(self.linha[14:19])
self.tabseg_tabela['tipo1'].append(self.linha[20:26])
self.tabseg_tabela['tipo2'].append(self.linha[27:33])
self.tabseg_tabela['num2'].append(self.linha[34:39])
self.tabseg_tabela['carg'].append(self.linha[40:45])
self.dados['tabseg_tabela']['valores'] = self.tabseg_tabela
self.tabseg_tabela_df = | pd.DataFrame(self.tabseg_tabela) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from abc import ABC
from flask import Flask, request
import json
from utils.destinations import destinations
import swapi
from preprocess.preprocess_starship_data import PreProcessing
from starships.recommender_system import StarShipRecommendation
import pandas as pd
import os
import logging
from flask_script import Manager
# POST: /api/starships/recommend
# =====================
# --------------------------------------------------
# Content-Type: application/json
#
# { "id": 6 }
# --------------------------------------------------
# Example response:
# --------------------------------------------------
# Content-Type: application/json
# curl -H "Content-Type: application/json" -X POST http://0.0.0.0:5000/api/starships/recommend -d '{"id":9}'
# { "alternatives": [ {starship 1}, {starship 2}, {starship 3} ...] }
# --------------------------------------------------
""" Write your API endpoint here """
# GET: /api/starships/
# =====================
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = Flask(__name__)
@app.before_first_request
def __cosine_similartie__():
logger.info("Checking for cosine similarity csv")
if not os.path.exists("cosine_similarities.csv"):
logger.debug("CSV not found, starting process")
preprocessor = PreProcessing("starships")
cosine_sim = StarShipRecommendation()
logger.info("Initialising pre-processing ")
data = preprocessor.run_preprocessing().set_index("name", drop=True)
cosine_sim = cosine_sim.cosine_similarity_table(data.drop(labels=["starship_id"], axis=1),
data.drop(labels=["starship_id"], axis=1))
df = | pd.DataFrame(cosine_sim, columns=data.index) | pandas.DataFrame |
""" Almost everything related to Telemanom is done in this module.
Telemanom is framework for using LSTMs to detect anomalies in multivariate time series data, invented by [Hundman et al, 2018].
Most of the code below is a modified version of their code, released under an Apache 2.0 license.
The corresponding license text is at end of this file.
Source: https://github.com/khundman/telemanom
Paper: <NAME>, Laporte, <NAME>. Detecting Spacecraft Anomalies Using LSTMs and Nonparametric Dynamic Thresholding. KDD '18: Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data MiningJuly 2018 Pages 387–395. https://arxiv.org/abs/1802.04431
"""
from datetime import datetime as dt
from keras.callbacks import History, EarlyStopping, Callback
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential, load_model
import json
import logging
import more_itertools as mit
import numpy as np
import os
import pandas as pd
import sys
import yaml
import argparse
import traceback
import launch_utils
import dataset_preprocessing as dsp
from helper_funcs import append_logs, exit7
name4logs = "lib_telemanom_calc"
logger = logging.getLogger('telemanom')
# suppress tensorflow CPU speedup warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# --------------------------------------CHANNEL------------------------
class Channel:
def __init__(self, config, chan_id):
"""
Load and reshape channel values (predicted and actual).
Args:
config (obj): Config object containing parameters for processing
chan_id (str): channel id
Attributes:
id (str): channel id
config (obj): see Args
X_train (arr): training inputs with dimensions
[timesteps, l_s, input dimensions)
X_test (arr): test inputs with dimensions
[timesteps, l_s, input dimensions)
y_train (arr): actual channel training values with dimensions
[timesteps, n_predictions, 1)
y_test (arr): actual channel test values with dimensions
[timesteps, n_predictions, 1)
train (arr): train data
test(arr): test data
scale_lower (float) = None
scale_upper (float) = None
"""
self.id = chan_id
self.config = config
self.X_train = None
self.y_train = None
self.X_test = None
self.y_test = None
self.y_hat = None
self.train = None
self.test = None
self.scale_lower = None
self.scale_upper = None
self.bad_data = False
def shape_data(self, arr, train=True):
"""Shape raw input streams for ingestion into LSTM. config.l_s specifies
the sequence length of prior timesteps fed into the model at
each timestep t.
Args:
arr (np array): array of input streams with
dimensions [timesteps, 1, input dimensions]
train (bool): If shaping training data, this indicates
data can be shuffled
"""
data = []
# TODO: check for cases where arr is too short, making the range arg negative
len_for_range = len(arr) - self.config.l_s - self.config.n_predictions
if len_for_range > 0:
for i in range(len_for_range):
data.append(arr[i:i + self.config.l_s + self.config.n_predictions])
data = np.array(data)
assert len(data.shape) == 3
if train:
np.random.shuffle(data)
self.X_train = data[:, :-self.config.n_predictions, :]
self.y_train = data[:, -self.config.n_predictions:, 0] # telemetry value is at position 0
else:
self.X_test = data[:, :-self.config.n_predictions, :]
self.y_test = data[:, -self.config.n_predictions:, 0] # telemetry value is at position 0
else:
msg = "Caution: len_for_range is <= 0. Usually nothing to worry about, as the input-data scaling process could produse dataframes too small for LSTM"
append_logs(msg, name4logs, "always", "print")
self.X_train = None
self.y_train = None
self.X_test = None
self.y_test = None
self.bad_data = True
def load_data(self, observations_for_inference=None, scaling_factors=None, training_datapoints=None):
"""
Load train and test data from local.
"""
try:
if observations_for_inference is not None:
append_logs("inference data is from RAM", name4logs, "always", "print")
all_data_df = observations_for_inference
if training_datapoints is not None:
append_logs("training data is from RAM", name4logs, "always", "print")
all_data_df = training_datapoints
msg = "all_data_df.tail:\n" + str(all_data_df.tail())
append_logs(msg, name4logs, "always", "print")
# TODO: ckeck for the case where there is no such column
raw_df = all_data_df[[self.id]]
msg = "Number of datapoints for " + str(self.id) + " :" + str(len(raw_df.index))
append_logs(msg, name4logs, "always", "print")
msg = "scaling_factors:" + str(scaling_factors)
append_logs(msg, name4logs, "always", "print")
one_channel_df, scale_lower, scale_upper = dsp.normilize_single_channel_df(raw_df, scaling_factors)
c = []
c.extend(range(0, 24))
c = [str(i) for i in c]
one_channel_df = one_channel_df.assign(**dict.fromkeys(c, 0))
channel_np = one_channel_df.to_numpy()
if observations_for_inference is None: # = will train, not infer
train_np = channel_np
self.train = train_np
self.shape_data(self.train)
else: # = will infer, not train
test_np = channel_np
self.test = test_np
self.shape_data(self.test, train=False)
self.scale_lower = scale_lower
self.scale_upper = scale_upper
except Exception as e:
msg = "Exception in def load_data(self): " + str(e) + " " + str(traceback.print_exc())
append_logs(msg, name4logs, "always", "print")
logger.critical(e)
logger.critical(msg)
# ---------------------------- ERRORS-------------------------------
class Errors:
def __init__(self, channel, config, run_id):
"""
Batch processing of errors between actual and predicted values
for a channel.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
config (obj): Config object containing parameters for processing
run_id (str): Datetime referencing set of predictions in use
Attributes:
config (obj): see Args
window_size (int): number of trailing batches to use in error
calculation
n_windows (int): number of windows in test values for channel
i_anom (arr): indices of anomalies in channel test values
E_seq (arr of tuples): array of (start, end) indices for each
continuous anomaly sequence in test values
anom_scores (arr): score indicating relative severity of each
anomaly sequence in E_seq
e (arr): errors in prediction (predicted - actual)
e_s (arr): exponentially-smoothed errors in prediction
normalized (arr): prediction errors as a percentage of the range
of the channel values
"""
self.config = config
self.window_size = self.config.window_size
self.i_anom = np.array([])
self.E_seq = []
self.anom_scores = []
if not channel.bad_data:
self.n_windows = int((channel.y_test.shape[0] -
(self.config.batch_size * self.window_size))
/ self.config.batch_size)
# raw prediction error
self.e = [abs(y_h - y_t[0]) for y_h, y_t in
zip(channel.y_hat, channel.y_test)]
smoothing_window = int(self.config.batch_size * self.config.window_size
* self.config.smoothing_perc)
if not len(channel.y_hat) == len(channel.y_test):
raise ValueError('len(y_hat) != len(y_test): {}, {}'
.format(len(channel.y_hat), len(channel.y_test)))
# smoothed prediction error
self.e_s = pd.DataFrame(self.e).ewm(span=smoothing_window) \
.mean().values.flatten()
# for values at beginning < sequence length, just use avg
if not channel.id == 'C-2': # anomaly occurs early in window
self.e_s[:self.config.l_s] = \
[np.mean(self.e_s[:self.config.l_s * 2])] * self.config.l_s
self.normalized = np.mean(self.e / np.ptp(channel.y_test))
logger.info("normalized prediction error: {0:.2f}"
.format(self.normalized))
else:
self.n_windows = None
self.e = None
self.e_s = None
self.normalized = None
def get_raw_prediction_errors(self):
return self.e
def adjust_window_size(self, channel):
"""
Decrease the historical error window size (h) if number of test
values is limited.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
"""
if not channel.bad_data:
while self.n_windows < 0:
self.window_size -= 1
self.n_windows = int((channel.y_test.shape[0]
- (self.config.batch_size * self.window_size))
/ self.config.batch_size)
if self.window_size == 1 and self.n_windows < 0:
raise ValueError('Batch_size ({}) larger than y_test (len={}). '
'Adjust in config.yaml.'
.format(self.config.batch_size,
channel.y_test.shape[0]))
def merge_scores(self):
"""
If anomalous sequences from subsequent batches are adjacent they
will automatically be combined. This combines the scores for these
initial adjacent sequences (scores are calculated as each batch is
processed) where applicable.
"""
merged_scores = []
score_end_indices = []
for i, score in enumerate(self.anom_scores):
if not score['start_idx'] - 1 in score_end_indices:
merged_scores.append(score['score'])
score_end_indices.append(score['end_idx'])
def process_batches(self, channel):
"""
Top-level function for the Error class that loops through batches
of values for a channel.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
"""
if not channel.bad_data:
self.adjust_window_size(channel)
for i in range(0, self.n_windows + 1):
prior_idx = i * self.config.batch_size
idx = (self.config.window_size * self.config.batch_size) \
+ (i * self.config.batch_size)
if i == self.n_windows:
idx = channel.y_test.shape[0]
window = ErrorWindow(channel, self.config, prior_idx, idx, self, i)
window.find_epsilon()
window.find_epsilon(inverse=True)
window.compare_to_epsilon(self)
window.compare_to_epsilon(self, inverse=True)
if len(window.i_anom) == 0 and len(window.i_anom_inv) == 0:
continue
window.prune_anoms()
window.prune_anoms(inverse=True)
if len(window.i_anom) == 0 and len(window.i_anom_inv) == 0:
continue
window.i_anom = np.sort(np.unique(
np.append(window.i_anom, window.i_anom_inv))).astype('int')
window.score_anomalies(prior_idx)
# update indices to reflect true indices in full set of values
self.i_anom = np.append(self.i_anom, window.i_anom + prior_idx)
self.anom_scores = self.anom_scores + window.anom_scores
if len(self.i_anom) > 0:
# group anomalous indices into continuous sequences
groups = [list(group) for group in
mit.consecutive_groups(self.i_anom)]
self.E_seq = [(int(g[0]), int(g[-1])) for g in groups
if not g[0] == g[-1]]
# additional shift is applied to indices so that they represent the
# position in the original data array, obtained from the files,
# and not the position on y_test (See PR #27).
self.E_seq = [(e_seq[0] + self.config.l_s,
e_seq[1] + self.config.l_s) for e_seq in self.E_seq]
self.merge_scores()
class ErrorWindow:
def __init__(self, channel, config, start_idx, end_idx, errors, window_num):
"""
Data and calculations for a specific window of prediction errors.
Includes finding thresholds, pruning, and scoring anomalous sequences
for errors and inverted errors (flipped around mean) - significant drops
in values can also be anomalous.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
config (obj): Config object containing parameters for processing
start_idx (int): Starting index for window within full set of
channel test values
end_idx (int): Ending index for window within full set of channel
test values
errors (arr): Errors class object
window_num (int): Current window number within channel test values
Attributes:
i_anom (arr): indices of anomalies in window
i_anom_inv (arr): indices of anomalies in window of inverted
telemetry values
E_seq (arr of tuples): array of (start, end) indices for each
continuous anomaly sequence in window
E_seq_inv (arr of tuples): array of (start, end) indices for each
continuous anomaly sequence in window of inverted telemetry
values
non_anom_max (float): highest smoothed error value below epsilon
non_anom_max_inv (float): highest smoothed error value below
epsilon_inv
config (obj): see Args
anom_scores (arr): score indicating relative severity of each
anomaly sequence in E_seq within a window
window_num (int): see Args
sd_lim (int): default number of standard deviations to use for
threshold if no winner or too many anomalous ranges when scoring
candidate thresholds
sd_threshold (float): number of standard deviations for calculation
of best anomaly threshold
sd_threshold_inv (float): same as above for inverted channel values
e_s (arr): exponentially-smoothed prediction errors in window
e_s_inv (arr): inverted e_s
sd_e_s (float): standard deviation of e_s
mean_e_s (float): mean of e_s
epsilon (float): threshold for e_s above which an error is
considered anomalous
epsilon_inv (float): threshold for inverted e_s above which an error
is considered anomalous
y_test (arr): Actual telemetry values for window
sd_values (float): st dev of y_test
perc_high (float): the 95th percentile of y_test values
perc_low (float): the 5th percentile of y_test values
inter_range (float): the range between perc_high - perc_low
num_to_ignore (int): number of values to ignore initially when
looking for anomalies
"""
self.i_anom = np.array([])
self.E_seq = np.array([])
self.non_anom_max = -1000000
self.i_anom_inv = np.array([])
self.E_seq_inv = np.array([])
self.non_anom_max_inv = -1000000
self.config = config
self.anom_scores = []
self.window_num = window_num
self.sd_lim = 12.0
self.sd_threshold = self.sd_lim
self.sd_threshold_inv = self.sd_lim
if not channel.bad_data:
self.e_s = errors.e_s[start_idx:end_idx]
self.mean_e_s = np.mean(self.e_s)
self.sd_e_s = np.std(self.e_s)
self.e_s_inv = np.array([self.mean_e_s + (self.mean_e_s - e)
for e in self.e_s])
self.epsilon = self.mean_e_s + self.sd_lim * self.sd_e_s
self.epsilon_inv = self.mean_e_s + self.sd_lim * self.sd_e_s
self.y_test = channel.y_test[start_idx:end_idx]
self.sd_values = np.std(self.y_test)
self.perc_high, self.perc_low = np.percentile(self.y_test, [95, 5])
self.inter_range = self.perc_high - self.perc_low
# ignore initial error values until enough history for processing
self.num_to_ignore = self.config.l_s * 2
# if y_test is small, ignore fewer
if len(channel.y_test) < 2500:
self.num_to_ignore = self.config.l_s
if len(channel.y_test) < 1800:
self.num_to_ignore = 0
def find_epsilon(self, inverse=False):
"""
Find the anomaly threshold that maximizes function representing
tradeoff between:
a) number of anomalies and anomalous ranges
b) the reduction in mean and st dev if anomalous points are removed
from errors
(see https://arxiv.org/pdf/1802.04431.pdf)
Args:
inverse (bool): If true, epsilon is calculated for inverted errors
"""
e_s = self.e_s if not inverse else self.e_s_inv
max_score = -10000000
for z in np.arange(2.5, self.sd_lim, 0.5):
epsilon = self.mean_e_s + (self.sd_e_s * z)
pruned_e_s = e_s[e_s < epsilon]
i_anom = np.argwhere(e_s >= epsilon).reshape(-1, )
buffer = np.arange(1, self.config.error_buffer)
i_anom = np.sort(np.concatenate((i_anom,
np.array([i + buffer for i in i_anom])
.flatten(),
np.array([i - buffer for i in i_anom])
.flatten())))
i_anom = i_anom[(i_anom < len(e_s)) & (i_anom >= 0)]
i_anom = np.sort(np.unique(i_anom))
if len(i_anom) > 0:
# group anomalous indices into continuous sequences
groups = [list(group) for group
in mit.consecutive_groups(i_anom)]
E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]
mean_perc_decrease = (self.mean_e_s - np.mean(pruned_e_s)) \
/ self.mean_e_s
sd_perc_decrease = (self.sd_e_s - np.std(pruned_e_s)) \
/ self.sd_e_s
score = (mean_perc_decrease + sd_perc_decrease) \
/ (len(E_seq) ** 2 + len(i_anom))
# sanity checks / guardrails
if score >= max_score and len(E_seq) <= 5 and \
len(i_anom) < (len(e_s) * 0.5):
max_score = score
if not inverse:
self.sd_threshold = z
self.epsilon = self.mean_e_s + z * self.sd_e_s
else:
self.sd_threshold_inv = z
self.epsilon_inv = self.mean_e_s + z * self.sd_e_s
def compare_to_epsilon(self, errors_all, inverse=False):
"""
Compare smoothed error values to epsilon (error threshold) and group
consecutive errors together into sequences.
Args:
:param errors_all: Errors class object containing list of all
previously identified anomalies in test set
:param inverse: a boolean
"""
e_s = self.e_s if not inverse else self.e_s_inv
epsilon = self.epsilon if not inverse else self.epsilon_inv
# Check: scale of errors compared to values too small?
if not (self.sd_e_s > (.05 * self.sd_values) or max(self.e_s)
> (.05 * self.inter_range)) or not max(self.e_s) > 0.05:
return
i_anom = np.argwhere((e_s >= epsilon) &
(e_s > 0.05 * self.inter_range)).reshape(-1, )
if len(i_anom) == 0:
return
buffer = np.arange(1, self.config.error_buffer + 1)
i_anom = np.sort(np.concatenate((i_anom,
np.array([i + buffer for i in i_anom])
.flatten(),
np.array([i - buffer for i in i_anom])
.flatten())))
i_anom = i_anom[(i_anom < len(e_s)) & (i_anom >= 0)]
# if it is first window, ignore initial errors (need some history)
if self.window_num == 0:
i_anom = i_anom[i_anom >= self.num_to_ignore]
else:
i_anom = i_anom[i_anom >= len(e_s) - self.config.batch_size]
i_anom = np.sort(np.unique(i_anom))
# capture max of non-anomalous values below the threshold
# (used in filtering process)
batch_position = self.window_num * self.config.batch_size
window_indices = np.arange(0, len(e_s)) + batch_position
adj_i_anom = i_anom + batch_position
window_indices = np.setdiff1d(window_indices,
np.append(errors_all.i_anom, adj_i_anom))
candidate_indices = np.unique(window_indices - batch_position)
non_anom_max = np.max(np.take(e_s, candidate_indices))
# group anomalous indices into continuous sequences
groups = [list(group) for group in mit.consecutive_groups(i_anom)]
E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]
if inverse:
self.i_anom_inv = i_anom
self.E_seq_inv = E_seq
self.non_anom_max_inv = non_anom_max
else:
self.i_anom = i_anom
self.E_seq = E_seq
self.non_anom_max = non_anom_max
def prune_anoms(self, inverse=False):
"""
Remove anomalies that don't meet minimum separation from the next
closest anomaly or error value
Args:
inverse (bool): If true, epsilon is calculated for inverted errors
"""
e_seq = self.E_seq if not inverse else self.E_seq_inv
e_s = self.e_s if not inverse else self.e_s_inv
non_anom_max = self.non_anom_max if not inverse \
else self.non_anom_max_inv
if len(e_seq) == 0:
return
e_seq_max = np.array([max(e_s[e[0]:e[1] + 1]) for e in e_seq])
e_seq_max_sorted = np.sort(e_seq_max)[::-1]
e_seq_max_sorted = np.append(e_seq_max_sorted, [non_anom_max])
i_to_remove = np.array([])
for i in range(0, len(e_seq_max_sorted) - 1):
if (e_seq_max_sorted[i] - e_seq_max_sorted[i + 1]) \
/ e_seq_max_sorted[i] < self.config.p:
i_to_remove = np.append(i_to_remove, np.argwhere(
e_seq_max == e_seq_max_sorted[i]))
else:
i_to_remove = np.array([])
i_to_remove[::-1].sort()
if len(i_to_remove) > 0:
e_seq = np.delete(e_seq, i_to_remove, axis=0)
if len(e_seq) == 0 and inverse:
self.i_anom_inv = np.array([])
return
elif len(e_seq) == 0 and not inverse:
self.i_anom = np.array([])
return
indices_to_keep = np.concatenate([range(e_seq[0], e_seq[-1] + 1)
for e_seq in e_seq])
if not inverse:
mask = np.isin(self.i_anom, indices_to_keep)
self.i_anom = self.i_anom[mask]
else:
mask_inv = np.isin(self.i_anom_inv, indices_to_keep)
self.i_anom_inv = self.i_anom_inv[mask_inv]
def score_anomalies(self, prior_idx):
"""
Calculate anomaly scores based on max distance from epsilon
for each anomalous sequence.
Args:
prior_idx (int): starting index of window within full set of test
values for channel
"""
groups = [list(group) for group in mit.consecutive_groups(self.i_anom)]
for e_seq in groups:
score_dict = {
"start_idx": e_seq[0] + prior_idx,
"end_idx": e_seq[-1] + prior_idx,
"score": 0
}
score = max([abs(self.e_s[i] - self.epsilon)
/ (self.mean_e_s + self.sd_e_s) for i in
range(e_seq[0], e_seq[-1] + 1)])
inv_score = max([abs(self.e_s_inv[i] - self.epsilon_inv)
/ (self.mean_e_s + self.sd_e_s) for i in
range(e_seq[0], e_seq[-1] + 1)])
# the max score indicates whether anomaly was from regular
# or inverted errors
score_dict['score'] = max([score, inv_score])
self.anom_scores.append(score_dict)
# -----------------------------------HELPERS -------------------------
class Config:
"""Loads parameters from config.yaml into global object
"""
def __init__(self, path_to_config):
self.path_to_config = path_to_config
if os.path.isfile(path_to_config):
pass
else:
self.path_to_config = 'config/{}'.format(self.path_to_config)
with open(self.path_to_config, "r") as f:
self.dictionary = yaml.load(f.read(), Loader=yaml.FullLoader)
for k, v in self.dictionary.items():
setattr(self, k, v)
def build_group_lookup(self, path_to_groupings):
channel_group_lookup = {}
with open(path_to_groupings, "r") as f:
groupings = json.loads(f.read())
for subsystem in groupings.keys():
for subgroup in groupings[subsystem].keys():
for chan in groupings[subsystem][subgroup]:
channel_group_lookup[chan["key"]] = {}
channel_group_lookup[chan["key"]]["subsystem"] = subsystem
channel_group_lookup[chan["key"]]["subgroup"] = subgroup
return channel_group_lookup
def setup_logging():
'''Configure logging object to track parameter settings, training, and evaluation.
Args:
config(obj): Global object specifying system runtime params.
Returns:
logger (obj): Logging object
_id (str): Unique identifier generated from datetime for storing data/models/results
'''
logger_obj = logging.getLogger('telemanom')
logger_obj.setLevel(logging.INFO)
stdout = logging.StreamHandler(sys.stdout)
stdout.setLevel(logging.INFO)
logger_obj.addHandler(stdout)
return logger_obj
# -------------------------------------MODELLING-------------------------
class Model:
def __init__(self, config, run_id, channel, single_channel_model=None):
"""
Loads/trains RNN and predicts future telemetry values for a channel.
Args:
config (obj): Config object containing parameters for processing
and model training
run_id (str): Datetime referencing set of predictions in use
channel (obj): Channel class object containing train/test data
for X,y for a single channel
Attributes:
config (obj): see Args
chan_id (str): channel id
run_id (str): see Args
y_hat (arr): predicted channel values
model (obj): trained RNN model for predicting channel values
"""
self.config = config
self.chan_id = channel.id
self.run_id = run_id
self.y_hat = np.array([])
self.model = None
self.scale_lower = channel.scale_lower
self.scale_upper = channel.scale_upper
self.single_channel_model = single_channel_model
msg = "self.config.train: " + str(self.config.train)
append_logs(msg, name4logs, "always", "print")
if not self.config.train:
try:
self.load_from_ram()
except Exception as e:
msg = "Exception in class Model:" + str(e) + " " + str(traceback.print_exc())
append_logs(msg, name4logs, "always", "print")
self.train_new(channel)
# self.save()
else:
self.train_new(channel)
# self.save()
def load_from_ram(self):
self.model = self.single_channel_model.model
append_logs("loaded model from RAM", name4logs, "always", "print")
def train_new(self, channel):
"""
Train LSTM model according to specifications in config.yaml.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
"""
if not channel.bad_data:
cbs = [History(), EarlyStopping(monitor='val_loss',
patience=self.config.patience,
min_delta=self.config.min_delta,
verbose=0)]
self.model = Sequential()
self.model.add(LSTM(
self.config.layers[0],
input_shape=(None, channel.X_train.shape[2]),
return_sequences=True))
self.model.add(Dropout(self.config.dropout))
self.model.add(LSTM(
self.config.layers[1],
return_sequences=False))
self.model.add(Dropout(self.config.dropout))
self.model.add(Dense(
self.config.n_predictions))
self.model.add(Activation('linear'))
self.model.compile(loss=self.config.loss_metric,
optimizer=self.config.optimizer)
self.model.fit(channel.X_train,
channel.y_train,
batch_size=self.config.lstm_batch_size,
epochs=self.config.epochs,
validation_split=self.config.validation_split,
callbacks=cbs,
verbose=True)
# def save(self):
# """
# Save trained model.
# """
# self.model.save(os.path.join('data', self.run_id, 'models',
# '{}.h5'.format(self.chan_id)))
# self.model.save(os.path.join('pickled_models', 'telemanom',
# '{}.h5'.format(self.chan_id)))
# with open(os.path.join("pickled_models", "telemanom", "{}_data_scale.txt".format(self.chan_id)), "w") as f:
# f.write(str(self.scale_lower) + "\n")
# f.write(str(self.scale_upper) + "\n")
def return_model(self):
return self.model
def aggregate_predictions(self, y_hat_batch, method='first'):
"""
Aggregates predictions for each timestep. When predicting n steps
ahead where n > 1, will end up with multiple predictions for a
timestep.
Args:
y_hat_batch (arr): predictions shape (<batch length>, <n_preds)
method (string): indicates how to aggregate for a timestep - "first"
or "mean"
"""
agg_y_hat_batch = np.array([])
for t in range(len(y_hat_batch)):
start_idx = t - self.config.n_predictions
start_idx = start_idx if start_idx >= 0 else 0
# predictions pertaining to a specific timestep lie along diagonal
y_hat_t = np.flipud(y_hat_batch[start_idx:t + 1]).diagonal()
if method == 'first':
agg_y_hat_batch = np.append(agg_y_hat_batch, [y_hat_t[0]])
elif method == 'mean':
agg_y_hat_batch = np.append(agg_y_hat_batch, np.mean(y_hat_t))
agg_y_hat_batch = agg_y_hat_batch.reshape(len(agg_y_hat_batch), 1)
self.y_hat = np.append(self.y_hat, agg_y_hat_batch)
def batch_predict(self, channel):
"""
Used trained LSTM model to predict test data arriving in batches.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
Returns:
channel (obj): Channel class object with y_hat values as attribute
"""
if not channel.bad_data:
num_batches = int((channel.y_test.shape[0] - self.config.l_s)
/ self.config.batch_size)
if num_batches < 0:
raise ValueError('l_s ({}) too large for stream length {}.'
.format(self.config.l_s, channel.y_test.shape[0]))
# simulate data arriving in batches, predict each batch
for i in range(0, num_batches + 1):
prior_idx = i * self.config.batch_size
idx = (i + 1) * self.config.batch_size
if i + 1 == num_batches + 1:
# remaining values won't necessarily equal batch size
idx = channel.y_test.shape[0]
X_test_batch = channel.X_test[prior_idx:idx]
y_hat_batch = self.model.predict(X_test_batch)
self.aggregate_predictions(y_hat_batch)
self.y_hat = np.reshape(self.y_hat, (self.y_hat.size,))
channel.y_hat = self.y_hat
return channel
# --------------------------------------- DETECTOR ------------------------
class Detector:
def __init__(self, labels_path=None, result_path='results/',
config_path='telemanom.yaml', input_metamodel=None, train_model7=True, observations_for_inference=None,
scaling_factors_for_inference_dic=None, training_datapoints=None):
"""
Top-level class for running anomaly detection over a group of channels
Also evaluates performance against a set of labels if provided.
Args:
labels_path (str): path to .csv containing labeled anomaly ranges
for group of channels to be processed
result_path (str): directory indicating where to stick result .csv
config_path (str): path to config.yaml
Attributes:
labels_path (str): see Args
results (list of dicts): holds dicts of results for each channel
result_df (dataframe): results converted to pandas dataframe
chan_df (dataframe): holds all channel information from labels .csv
result_tracker (dict): if labels provided, holds results throughout
processing for logging
config (obj): Channel class object containing train/test data
for X,y for a single channel
y_hat (arr): predicted channel values
id (str): datetime id for tracking different runs
result_path (str): see Args
"""
self.input_metamodel = input_metamodel
self.models_dic = None
self.scales_dic = None
self.labels_path = labels_path
self.results = []
self.result_df = None
self.raw_errors_dic = None
self.chan_df = None
self.observations_for_inference = observations_for_inference
self.training_datapoints = training_datapoints
self.scaling_factors_for_inference_dic = scaling_factors_for_inference_dic
self.result_tracker = {
'true_positives': 0,
'false_positives': 0,
'false_negatives': 0
}
self.config = Config(config_path)
self.y_hat = None
self.config.train = train_model7
self.config.predict = not train_model7
if not self.config.predict and self.config.use_id:
self.id = self.config.use_id
else:
self.id = dt.now().strftime('%Y-%m-%d_%H.%M.%S')
self.result_path = result_path
if self.labels_path:
self.chan_df = pd.read_csv(labels_path)
else:
chan_ids = launch_utils.read_configs()["data_channels"]
self.chan_df = | pd.DataFrame({"chan_id": chan_ids}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(
pd.Categorical(["b", "a"],
categories=["a", "b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d", Series([False, False, False]))
self.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a == 'a').all())
self.assertTrue(((a != 'a') == ~(a == 'a')).all())
self.assertFalse(('a' == a).all())
self.assertTrue((a == 'a')[0])
self.assertTrue(('a' == a)[0])
self.assertFalse(('a' != a)[0])
# vs list-like
self.assertTrue((a == a).all())
self.assertFalse((a != a).all())
self.assertTrue((a == list(a)).all())
self.assertTrue((a == b).all())
self.assertTrue((b == a).all())
self.assertTrue(((~(a == b)) == (a != b)).all())
self.assertTrue(((~(b == a)) == (b != a)).all())
self.assertFalse((a == c).all())
self.assertFalse((c == a).all())
self.assertFalse((a == d).all())
self.assertFalse((d == a).all())
# vs a cat-like
self.assertTrue((a == e).all())
self.assertTrue((e == a).all())
self.assertFalse((a == f).all())
self.assertFalse((f == a).all())
self.assertTrue(((~(a == e) == (a != e)).all()))
self.assertTrue(((~(e == a) == (e != a)).all()))
self.assertTrue(((~(a == f) == (a != f)).all()))
self.assertTrue(((~(f == a) == (f != a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df, df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
pd.concat([df, df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
def f():
pd.concat([s, s2])
self.assertRaises(ValueError, f)
result = pd.concat([s, s], ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s, s])
expected = Series(
list('abcabc'), index=[0, 1, 2, 0, 1, 2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list('cab'))})
tm.assert_frame_equal(result, expected)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'cab'))}).set_index('B')
result = pd.concat([df2, df2])
expected = DataFrame({'A': | pd.concat([a, a]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 23 12:30:52 2021
@author: zzhang
"""
import pandas as pd
import numpy as np
import yaml
from schimpy.schism_setup import create_schism_setup
def read_source_sink_yaml(yaml_fn):
"""
Parameters
----------
yaml_fn : STR
source_sink.yaml filename.
Returns
-------
df_sources : PANDAS dataframe
For sources
df_sinks : PANDAS dataframe
For sinks
"""
with open(yaml_fn, 'r') as file:
data = yaml.safe_load(file)
if 'sources' in data.keys():
df_sources = pd.DataFrame.from_dict(data['sources'],orient='index',
columns=['x','y'])
else:
df_sources = | pd.DataFrame() | pandas.DataFrame |
#! /usr/bin/env python3
"""Functions for Sensitivity Analysis
Contains wrapper functions for the SALib library and functions
to assess the convergence of the sensitivity indices.
"""
import os as os
import numpy as np
import pandas as pd
from itertools import combinations
from SALib.analyze import sobol
def format_saltelli(Y: np.ndarray, num_ma: int, len_ma: int) -> np.ndarray:
"""Convert matrix format to saltelli format
Reshapes the output array generated by the model to fit the required
shape of the input for the sobol.analyze function of the SALib
module.
Assumes the shape of the model output is (num_par + 2) x num_samples.
Parameters
----------
Y
Vector of model output
num_ma
Number of matrices (number of parameters + 2)
len_ma
Number of samples in one matrix
Returns
-------
Y_saltelli
Reshaped output vector
"""
Y_saltelli = np.roll(Y, len(Y) - 1, axis = 0)
Y_saltelli = np.reshape(Y_saltelli, num_ma * len_ma, order = 'F')
return Y_saltelli
def calc_sobol_indices(Y, problem, print_out=False,
keep=False, resamples=1000, cast_negative=False):
"""Calculate Sobol sensitivity indices for model output
Uses the SALib module to calculate the first-order and total sobol
indices. Optionally, the bootstrap samples can be stored to compute
other statistics that require bootstrapping. Returns the sensitivity
indices as a pandas dataframe. Second-order indices cannot be computed
with the current format of the input array.
Parameters
----------
Y
np.ndarray, array of model output with shape (num_par+2, num_samples)
problem
dict, problem dictionary as required by the SALib module
print_out
bool, optional, if True the sensitivity indices are printed to
the console
keep
bool, optional, if True also returns the bootstrap samples
resamples
int, optional, change the number of resamples for bootstrapping
Output
------
si_df
pandas dataframe of the computed indices with confidence intervals
si_resamples
optional, if keep is True a pandas dataframe of the bootstrap
samples is also returned
"""
D, N = Y.shape
Y_format = format_saltelli(Y, D, N)
Si = sobol.analyze(problem, Y_format, calc_second_order = False,
num_resamples = resamples,
print_to_console = print_out,
keep_resamples=keep,
)
if not keep:
si_df = pd.DataFrame.from_dict(Si).set_index([problem['names']])
if cast_negative:
si_df[si_df < 0] = 0
return si_df
else:
si_names = ['S1', 'S1_conf', 'ST', 'ST_conf']
si_dict = {key: Si[key] for key in si_names}
si_df = pd.DataFrame.from_dict(si_dict).set_index([problem['names']])
st_all = pd.DataFrame(Si['ST_conf_all'].T, index=problem['names'])
s1_all = pd.DataFrame(Si['S1_conf_all'].T, index=problem['names'])
si_resamples = pd.concat([s1_all, st_all], axis=1, keys=['S1', 'ST'])
if cast_negative:
si_df[si_df < 0] = 0
si_resamples[si_resamples < 0] = 0
return si_df, si_resamples
def summary_statistic(df, label):
"""Compute a summary statistic to check convergence
The width of the 95% confidence interval is used as a
summary statistic to evaluate the convergence of the
sensitivity index values.
"""
width = df.loc[:, label] * 2
stat = np.max(width)
return stat
def screening_convergence(df, idx, idx_conf, threshold):
"""Calculate convergence of low impact indices"""
low_impact = df.loc[df[idx] < threshold]
stat = np.max(list(low_impact[idx_conf] * 2))
return stat
def rank_index(data, by):
"""Rank parameters in dataframe by a column
Sorts the parameter names of the dataframe by the
values of a specified column. Assigns values to
each parameter. Higher assigned value indicates
higher rank and higher importance.
Parameters
----------
data
pd.dataframe, contains the sensitivity indices
parameters are listed in the rows and different
indices in the columns
by
str, name of a column containing the sensitivity
index by which to rank the parameters
if by == "all" iterate over all columns of the
dataframe and calculate rankings for each
Output
------
ranked
pd.Series or pd.DataFrame of the rank of each parameter
"""
if by == 'all':
rank_list = []
for n, it in data.items():
sorted_col = it.sort_values()
names = sorted_col.index
ranks = np.arange(1, len(names) + 1)
rank_series = pd.Series(ranks, index=names, name=n)
rank_list.append(rank_series)
ranked = | pd.concat(rank_list, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = | Series([1., 3., 6.8, 12., 18.2, 25.]) | pandas.Series |
import pandas as pd
import folium
import os
import json
from geopy.geocoders import Nominatim, GoogleV3
import argparse
def getAll(dd, tahun):
idx = len(dd)
for n in nama_kelurahan:
if n not in [loc for loc in dd['lokasi']]:
dd.loc[idx] = [tahun, n, 0]
idx += 1
return dd.reset_index().iloc[:, 1:]
with open('../Data/json/kelurahan_jakarta.json') as f:
dataa = json.load(f)
nama_kelurahan = [dt['properties']['KEL_NAME'] for dt in dataa['features']]
data_ekstraksi = pd.read_csv("../Data/data/Data Gabungan Bersih.csv",
index_col=0).drop_duplicates().reset_index().iloc[:, 1:]
# Mendapatkan koordinat longitude dan lattitude
locator = Nominatim(user_agent="myGeocoder")
alamat = []
lokasi = []
longlat = []
no = -1
def getkeyword(desa, kec):
if desa.__contains__("("):
key1 = [d.replace(")", "") for d in desa.split(" (")]
else:
key1 = [desa]
if kec.__contains__("("):
key2 = [d.replace(")", "") for d in kec.split(" (")]
else:
key2 = [kec]
keyword = [des+", "+kecmtn for des in key1 for kecmtn in key2]
return keyword
for idx, row in data_ekstraksi.lokasi.drop_duplicates().to_frame().reset_index().iterrows():
if row['lokasi'] != "Jakarta":
keyword = getkeyword(row['lokasi'], "Jakarta")
for k in keyword:
location = locator.geocode(k)
# no+=1
print(location, "|", k)
if location:
alamat.append(location[0])
longlat.append(location[1])
lokasi.append(row['lokasi'])
df = | pd.DataFrame({"lokasi": lokasi, "alamat": alamat, "longlat": longlat}) | pandas.DataFrame |
from collections import deque
from copy import deepcopy
from warnings import warn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from extrapolation import difference_matrix
def deque_from_tensors(xs, device, **kwargs):
return deque([x.to(device) for x in xs], **kwargs)
class ExperimentBase:
def __init__(self, seq, f, values=None, device="cpu"):
self.seq = seq
if values is None:
self.values = [f(x).item() for x in seq]
else:
self.values = values
self.f = f
self.device = device
self.k = {}
self.stride = {}
self.logs = {}
self.value_logs = {}
def _get_x_axis(self, method, n=None):
k = self.k[method]
stride = self.stride.get(method, 1)
if n is None:
n = len(self.logs[method])
else:
n //= stride
m = k + 2 + (n - 1) * stride
x = np.arange(k + 1, m, stride)
assert len(x) == n, f"{len(x)} != {n}"
return x
def plot_values(self, methods=None, n=None, ax=None):
if methods is None:
methods = self.logs.keys()
if ax is None:
ax = plt.gca()
if n is None:
n = len(self.values)
ax.plot(np.arange(n), self.values[:n], label="Original", alpha=0.8)
for m in methods:
x = self._get_x_axis(m, n)
ax.plot(x, self.value_logs[m][:len(x)], label=m, alpha=0.8)
def plot_log_diff(self, methods=None, n=None, compare_to="best", ax=None):
best = self.values[-1]
if compare_to == "best":
for s in self.value_logs.values():
if s[-1] < best:
best = s[-1]
if methods is None:
methods = self.logs.keys()
if ax is None:
ax = plt.gca()
if n is None:
n = len(self.values)
ax.plot(np.arange(n), np.log10(np.abs(np.array(self.values[:n]) - best)), label="Original", alpha=0.8)
for m in methods:
x = self._get_x_axis(m, n)
ax.plot(x,
np.log10(np.abs(np.array(self.value_logs[m][:len(x)]) - best)),
label=m,
alpha=0.8)
@property
def best_x(self):
best = self.values[-1]
best_x = self.seq[-1]
for k in self.value_logs.keys():
idx = np.argmin(self.value_logs[k][-10:])
if self.value_logs[k][-10:][idx] < best:
best = self.value_logs[k][-10:][idx]
best_x = self.logs[k][-10:][idx]
return best_x
def save(self, path):
d = {
"seq": self.seq,
"values": self.values,
"logs": self.logs,
"value_logs": self.value_logs,
"k": self.k,
}
torch.save(d, path)
def load(self, path):
d = torch.load(path)
self.seq = d["seq"]
self.values = d["values"]
self.logs = d["logs"]
self.value_logs = d["value_logs"]
self.k = d["k"]
def value_df(self):
s = {"Original": | pd.Series(self.values) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = | read_csv(path, sep=' ', header=0, na_values=['-999']) | pandas.io.parsers.read_csv |
import numpy as np
import pandas as pd
import warnings
def sigmoid(w,x,noise=0.):
return 1./(1+np.exp(-w.dot(x.T)-noise))
def generate_synthetic_data(ndata_per_action: int, n_actions: int, actions_rewards: np.array,
continuos_prms:dict, ordinal_prms: dict, categorical_prms:dict,
rstate: int, noise_scale:float,
return_inputs=False,output_info=False):
"""
Generate synthetic data apt for contextual multi armed bandit analysis.
Given a number of actions, and the global expected reward for each action, it builds a collection of datasets with a certain number of input features representing the context,
and a binary target representing the reward.
For each action, the reward is generated in the following way
* a random vector of weights is multiplied by the input features, the result then fed into a sigmoid to produce probabilities of success
* a reward (target=1) is then assigned according to the values given by the user (e.g. if the action reward is 10%, then all the data with probabilitie belonging to the last decile are labeled as 1)
Once labeling has been done for each class, the data are concatenated together. An integer tag is added to specify which action has been played for each row.
The method can produce a dataset where the inputs are
* continuos (normally distributed)
* ordinal
* categorical
A certain level of noise can be introduced by the user to make the data more or less easy to fit with machine learning methods.
Parameters
==========
ndata_per_action: int
Number of times we each action is played
n_actions: int
Number of actions
actions_rewards: np.array
Expected global reward for each action
continuos_prms:dict
Dictionary containing details to generate continuos features. It has the form
{'feature_name_1':{'loc':mu_1, 'scale':sigma_1},
'feature_name_2':{'loc':mu_2, 'scale':sigma_2},
...}
where 'loc' is the mean of the normal distribution, and 'scale' its standard deviation.
ordinal_prms: dict
Dictionary containing details to generate ordinal features. It has the form
{'feature_name_1':{'start':start_1,'stop':stop_1,'weights':[x1,x2,...]},
'feature_name_2':{'start':start_2,'stop':stop_2,'weights':[y1,y2,...]},
...}
where 'start' and 'stop' are the end point of the range of values and 'weights' the frequency of each value belonging to the interval (the sum of then has to be 1).
categorical_prms:dict
Dictionary containing details to generate categorical features. It has the form
{'feature_name_1':{'levels':[l_1,l_2...,l_k],'weights':[x1,x2,...,x_k]},
'feature_name_2':{'levels':[d_1,d_2...,d_k],'weights':[y1,y2,...,y_k]},
...}
where 'levels' is the list of categorical levels and 'weights' the frequency of each level (the sum of then has to be 1).
rstate: int
random seed
noise_scale:float
Standard deviation of the noise term (which is normally distribute around 0)
return_inputs=False
When True, only the context is returned
output_info=False
When True, generci information about the output dataset are provided to the user
Returns
=======
input_data: pd.DataFrame
Contextual dataset
"""
n_continuos = len(continuos_prms)
n_ordinals = len(ordinal_prms)
n_categoricals = len(categorical_prms)
tot_data = ndata_per_action*n_actions
input_data = pd.DataFrame()
np.random.seed(rstate)
# continuos data
for feature_name, sub_dict in continuos_prms.items():
input_data[feature_name] = np.random.normal(loc=sub_dict['loc'], scale=sub_dict['scale'], size=tot_data)
# ordinal
for feature_name, sub_dict in ordinal_prms.items():
input_data[feature_name] = np.random.choice(np.arange(sub_dict['start'], sub_dict['stop']),p=sub_dict['weights'],size=tot_data)
# categorical
for feature_name, sub_dict in categorical_prms.items():
input_data[feature_name] = np.random.choice(sub_dict['levels'],p=sub_dict['weights'],size=tot_data)
if return_inputs:
return input_data
#building the target features
rewards = np.array([])
action_codes = []
for action_id in range(n_actions):
X = input_data[action_id*ndata_per_action:(action_id+1)*ndata_per_action]
X=pd.get_dummies(X,drop_first=True).values
X = np.hstack((np.ones((ndata_per_action,1)),X))
weights = np.random.normal(size=X.shape[1])
noise = np.random.normal(scale=noise_scale,size=ndata_per_action)
probabilities = sigmoid(weights,X,noise)
cut_point = np.quantile(probabilities,1-actions_rewards[action_id])
mask = probabilities > cut_point
tmp = np.zeros(len(probabilities))
tmp[mask] = 1
rewards = np.hstack((rewards,tmp))
action_codes += [action_id+1]*ndata_per_action
input_data['reward'] = rewards
input_data['reward'] = input_data['reward'].astype(int)
input_data['action_code'] = action_codes
if output_info:
print('Total number of data: {}'.format(tot_data))
actions = list(range(1,n_actions+1))
print('Actions played: {}'.format(actions))
avg_rew = input_data.groupby(by='action_code')[['reward']].mean()
if (avg_rew.values==0.).any():
warnings.warn('No reward has been produced for some of the actions: reduce noise or change the imbalance values')
print('Global expected rewards per action')
display(avg_rew)
return input_data
def generate_synthetic_trial_data(ndata_per_action: int, n_actions: int, actions_rewards: np.array,
continuos_prms:dict, ordinal_prms: dict, categorical_prms:dict,
rstate: int, noise_scale:float, weights: list,
return_inputs=False,output_info=False):
"""
Generate synthetic data apt for simulate a contextual multi armed bandit experiment.
Given a number of actions, and the global expected reward for each action, it builds a dataset with a certain number of input features representing the context,
and a collection of binary columns representing the reward obtained by playing each action.
The reward is distributed among the availabkle actions in the following way
* a random vector of weights is multiplied by the input features, the result then fed into a sigmoid to produce probabilities of success
* a reward (target=1) is then assigned according to the action with the highest probabilities.
Once labeling has been done, the parameters in 'actions_rewards' are used to reduce the overall average reward of each action.
The method can produce a dataset where the inputs are
* continuos (normally distributed)
* ordinal
* categorical
A certain level of noise can be introduced by the user to make the data more or less easy to fit with machine learning methods.
Parameters
==========
ndata_per_action: int
Number of times we each action is played
n_actions: int
Number of actions
actions_rewards: np.array
Expected global reward for each action
continuos_prms:dict
Dictionary containing details to generate continuos features. It has the form
{'feature_name_1':{'loc':mu_1, 'scale':sigma_1},
'feature_name_2':{'loc':mu_2, 'scale':sigma_2},
...}
where 'loc' is the mean of the normal distribution, and 'scale' its standard deviation.
ordinal_prms: dict
Dictionary containing details to generate ordinal features. It has the form
{'feature_name_1':{'start':start_1,'stop':stop_1,'weights':[x1,x2,...]},
'feature_name_2':{'start':start_2,'stop':stop_2,'weights':[y1,y2,...]},
...}
where 'start' and 'stop' are the end point of the range of values and 'weights' the frequency of each value belonging to the interval (the sum of then has to be 1).
categorical_prms:dict
Dictionary containing details to generate categorical features. It has the form
{'feature_name_1':{'levels':[l_1,l_2...,l_k],'weights':[x1,x2,...,x_k]},
'feature_name_2':{'levels':[d_1,d_2...,d_k],'weights':[y1,y2,...,y_k]},
...}
where 'levels' is the list of categorical levels and 'weights' the frequency of each level (the sum of then has to be 1).
rstate: int
random seed
noise_scale:float
Standard deviation of the noise term (which is normally distribute around 0)
return_inputs=False
When True, only the context is returned
output_info=False
When True, generci information about the output dataset are provided to the user
Returns
=======
input_data: pd.DataFrame
Contextual dataset
"""
n_continuos = len(continuos_prms)
n_ordinals = len(ordinal_prms)
n_categoricals = len(categorical_prms)
tot_data = ndata_per_action
input_data = pd.DataFrame()
np.random.seed(rstate)
# continuos data
for feature_name, sub_dict in continuos_prms.items():
input_data[feature_name] = np.random.normal(loc=sub_dict['loc'], scale=sub_dict['scale'], size=tot_data)
# ordinal
for feature_name, sub_dict in ordinal_prms.items():
input_data[feature_name] = np.random.choice(np.arange(sub_dict['start'], sub_dict['stop']),p=sub_dict['weights'],size=tot_data)
# categorical
for feature_name, sub_dict in categorical_prms.items():
input_data[feature_name] = np.random.choice(sub_dict['levels'],p=sub_dict['weights'],size=tot_data)
if return_inputs:
return input_data
#building the target features
action_codes = []
features = list(continuos_prms.keys())+list(ordinal_prms.keys())+list(categorical_prms.keys())
for action_id in range(n_actions):
X=pd.get_dummies(input_data[features],drop_first=True).values
X = np.hstack((np.ones((ndata_per_action,1)),X))
weights_vec = np.array(weights[action_id])
noise = np.random.normal(scale=noise_scale,size=ndata_per_action)
probabilities = sigmoid(weights_vec,X,noise)
input_data['action_prob_{}'.format(action_id+1)] = probabilities
input_data['action_{}_reward'.format(action_id+1)] = 0.
action_probs = ['action_prob_{}'.format(action_id) for action_id in range(1,n_actions+1)]
loc_reward = np.argmax(input_data[action_probs].values,axis=1)
for idx,loc in enumerate(loc_reward):
input_data.loc[idx,'action_{}_reward'.format(loc+1)] = 1.
for action_id in range(n_actions):
probabilities = input_data['action_prob_{}'.format(action_id+1)].values
cut_point = np.quantile(probabilities,1-actions_rewards[action_id])
mask = probabilities <= cut_point
input_data.loc[mask,'action_{}_reward'.format(action_id+1)] = 0.
input_data.drop(columns=action_probs,inplace=True)
return input_data
def generate_experimental_dataset(sizes,
list_of_class_weights,
list_of_continuos_dicts,
list_of_ordinal_dicts,
list_of_categorical_dicts,
list_of_noise_scales,
list_of_model_weights,
seed,
output_info=False):
list_of_frames = []
g=0
for n,cw,ct_prm,ord_prm,catg_prm,noise_scale,weights in zip(sizes,
list_of_class_weights,
list_of_continuos_dicts,
list_of_ordinal_dicts,
list_of_categorical_dicts,
list_of_noise_scales,
list_of_model_weights):
g+=1
dataset = generate_synthetic_trial_data(n,len(cw),cw,
ct_prm,ord_prm,catg_prm,seed,noise_scale,weights=weights)
if output_info:
print('Group {}'.format(g))
action_cols = ['action_{}_reward'.format(idx) for idx in range(1,len(cw)+1)]
display(dataset[action_cols].mean())
list_of_frames.append(dataset)
final_frame = | pd.concat(list_of_frames) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import copy
from openpyxl import load_workbook
from openpyxl import Workbook
from pandas.core.frame import DataFrame
from MyPythonDocx import *
def cal_risk_cnt(page, ips=[]):
values = []
df = | DataFrame(page[1:], columns=page[0]) | pandas.core.frame.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import assignment2_helper as helper
# Look pretty...
# matplotlib.style.use('ggplot')
plt.style.use('ggplot')
# Do * NOT * alter this line, until instructed!
scaleFeatures = True
#
file_path = "/Users/szabolcs/dev/git/DAT210x/Module4/Datasets/"
file_name = "kidney_disease.csv"
exclude_columns = ['id', 'classification'] #, 'rbc', 'pc', 'pcc', 'ba', 'htn', 'dm', 'cad', 'appet', 'pe', 'ane']
df = pd.read_csv(file_path + file_name)
labels = ['red' if i=='ckd' else 'green' for i in df.classification]
df.drop(exclude_columns, axis=1, inplace=True)
print(df.head())
df = pd.get_dummies(df, columns=["rbc"])
df = pd.get_dummies(df, columns=["pc"])
df = pd.get_dummies(df, columns=["pcc"])
df = pd.get_dummies(df, columns=["ba"])
df = pd.get_dummies(df, columns=["htn"])
df = pd.get_dummies(df, columns=["dm"])
df = pd.get_dummies(df, columns=["cad"])
df = pd.get_dummies(df, columns=["appet"])
df = pd.get_dummies(df, columns=["pe"])
df = pd.get_dummies(df, columns=["ane"])
df.pcv = | pd.to_numeric(df.pcv, errors="coerce") | pandas.to_numeric |
import pandas as pd
import numpy as np
from pathlib import Path
import datetime
def replaceNAsManagers(managers, gameLogs, default=True):
onlyMans = None
for playerColumn in gameLogs.columns:
if playerColumn.find("manager")>-1:
players = gameLogs[['Date',playerColumn]]
players['yearID'] = pd.DatetimeIndex(pd.to_datetime(players['Date'])).year-1
players = players.rename(columns={playerColumn:"playerID"})
onlyMans = pd.concat([onlyMans, players]).drop(columns='Date').drop_duplicates().dropna().reset_index(drop=True)
managers = managers.groupby(['yearID','playerID'], as_index=False)['Games','Wins','Losses'].sum()
players = managers['playerID'].unique()
years = managers['yearID'].unique()
players = np.array(list(dict.fromkeys(players.tolist()+onlyMans['playerID'].unique().tolist())))
years = np.array(list(dict.fromkeys(years.tolist()+onlyMans['yearID'].unique().tolist())))
fullMans = pd.DataFrame(np.array(np.meshgrid(years, players)).T.reshape(-1,2), columns=['yearID','playerID'])
fullMans['yearID'] = pd.to_numeric(fullMans['yearID'])
fullMans = pd.merge(fullMans, managers, on=['yearID','playerID'], how="left")
fullMans = pd.merge(fullMans[['yearID','playerID']], fullMans.groupby(['playerID']).ffill().drop(columns=['yearID']), left_index=True, right_index=True)
if default:
fullMans = fullMans.fillna(0)
fullMans = pd.merge(onlyMans, fullMans, on=['yearID','playerID'], how="left")
return fullMans
def replaceNAsFielding(fieldings, gameLogs, default=True):
onlyField = None
for playerColumn in gameLogs.columns:
if playerColumn.find("player")>-1:
players = gameLogs[['Date',playerColumn]]
players['yearID'] = pd.DatetimeIndex(pd.to_datetime(players['Date'])).year-1
players = players.rename(columns={playerColumn:"playerID"})
onlyField = pd.concat([onlyField, players]).drop(columns='Date').drop_duplicates().dropna().reset_index(drop=True)
fieldings = fieldings.groupby(['yearID','playerID'], as_index=False).sum()
players = fieldings['playerID'].unique()
years = fieldings['yearID'].unique()
players = np.array(list(dict.fromkeys(players.tolist()+onlyField['playerID'].unique().tolist())))
years = np.array(list(dict.fromkeys(years.tolist()+onlyField['yearID'].unique().tolist())))
fullField = pd.DataFrame(np.array(np.meshgrid(years, players)).T.reshape(-1,2), columns=['yearID','playerID'])
fullField['yearID'] = pd.to_numeric(fullField['yearID'])
fullField = pd.merge(fullField, fieldings, on=['yearID','playerID'], how="left")
fullField = pd.merge(fullField[['yearID','playerID']], fullField.groupby(['playerID']).ffill().drop(columns=['yearID']), left_index=True, right_index=True)
if default:
fullField = fullField.fillna(0)
fullField = pd.merge(onlyField, fullField, on=['yearID','playerID'], how="left")
return fullField
def replaceNAsBatting(battings, gameLogs, default=True):
onlyBatts = None
for playerColumn in gameLogs.columns:
if playerColumn.find("player")>-1:
players = gameLogs[['Date',playerColumn]]
players['yearID'] = pd.DatetimeIndex( | pd.to_datetime(players['Date']) | pandas.to_datetime |
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
import vectorbt as vbt
from vectorbt import settings
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
ret = ts.pct_change()
settings.returns['year_freq'] = '252 days' # same as empyrical
seed = 42
np.random.seed(seed)
benchmark_rets = pd.DataFrame({
'a': ret['a'] * np.random.uniform(0.8, 1.2, ret.shape[0]),
'b': ret['b'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 2,
'c': ret['c'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 3
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert ret.vbt.returns.wrapper.freq == day_dt
assert ret['a'].vbt.returns.wrapper.freq == day_dt
assert ret.vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert ret['a'].vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([1, 2, 3]).vbt.returns.wrapper.freq is None
assert pd.Series([1, 2, 3]).vbt.returns(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([1, 2, 3]).vbt.returns(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
def test_ann_factor(self):
assert ret['a'].vbt.returns(year_freq='365 days').ann_factor == 365
assert ret.vbt.returns(year_freq='365 days').ann_factor == 365
with pytest.raises(Exception) as e_info:
assert pd.Series([1, 2, 3]).vbt.returns(freq=None).ann_factor
def test_from_price(self):
pd.testing.assert_series_equal(pd.Series.vbt.returns.from_price(ts['a']).obj, ts['a'].pct_change())
pd.testing.assert_frame_equal(pd.DataFrame.vbt.returns.from_price(ts).obj, ts.pct_change())
assert pd.Series.vbt.returns.from_price(ts['a'], year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert pd.DataFrame.vbt.returns.from_price(ts, year_freq='365 days').year_freq == pd.to_timedelta('365 days')
def test_daily(self):
ret_12h = pd.DataFrame({
'a': [0.1, 0.1, 0.1, 0.1, 0.1],
'b': [-0.1, -0.1, -0.1, -0.1, -0.1],
'c': [0.1, -0.1, 0.1, -0.1, 0.1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1, 0),
datetime(2018, 1, 1, 12),
datetime(2018, 1, 2, 0),
datetime(2018, 1, 2, 12),
datetime(2018, 1, 3, 0)
]))
pd.testing.assert_series_equal(
ret_12h['a'].vbt.returns.daily(),
pd.Series(
np.array([0.21, 0.21, 0.1]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
name=ret_12h['a'].name
)
)
pd.testing.assert_frame_equal(
ret_12h.vbt.returns.daily(),
pd.DataFrame(
np.array([
[0.21, -0.19, -0.01],
[0.21, -0.19, -0.01],
[0.1, -0.1, 0.1]
]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
columns=ret_12h.columns
)
)
def test_annual(self):
pd.testing.assert_series_equal(
ret['a'].vbt.returns.annual(),
pd.Series(
np.array([4.]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
name=ret['a'].name
)
)
pd.testing.assert_frame_equal(
ret.vbt.returns.annual(),
pd.DataFrame(
np.array([[4., -0.8, 0.]]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
columns=ret.columns
)
)
def test_cumulative(self):
res_a = empyrical.cum_returns(ret['a']).rename('a')
res_b = empyrical.cum_returns(ret['b']).rename('b')
res_c = empyrical.cum_returns(ret['c']).rename('c')
pd.testing.assert_series_equal(
ret['a'].vbt.returns.cumulative(),
res_a
)
pd.testing.assert_frame_equal(
ret.vbt.returns.cumulative(),
pd.concat([res_a, res_b, res_c], axis=1)
)
def test_total_return(self):
res_a = empyrical.cum_returns_final(ret['a'])
res_b = empyrical.cum_returns_final(ret['b'])
res_c = empyrical.cum_returns_final(ret['c'])
assert isclose(ret['a'].vbt.returns.total(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.total(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('total_return')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_total(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_annualized_return(self):
res_a = empyrical.annual_return(ret['a'])
res_b = empyrical.annual_return(ret['b'])
res_c = empyrical.annual_return(ret['c'])
assert isclose(ret['a'].vbt.returns.annualized(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_return')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_annualized(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_alpha",
[1., 2., 3.],
)
def test_annualized_volatility(self, test_alpha):
res_a = empyrical.annual_volatility(ret['a'], alpha=test_alpha)
res_b = empyrical.annual_volatility(ret['b'], alpha=test_alpha)
res_c = empyrical.annual_volatility(ret['c'], alpha=test_alpha)
assert isclose(ret['a'].vbt.returns.annualized_volatility(levy_alpha=test_alpha), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized_volatility(levy_alpha=test_alpha),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_volatility')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_annualized_volatility(ret.shape[0], minp=1, levy_alpha=test_alpha).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_calmar_ratio(self):
res_a = empyrical.calmar_ratio(ret['a'])
res_b = empyrical.calmar_ratio(ret['b'])
res_c = empyrical.calmar_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.calmar_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.calmar_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('calmar_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_calmar_ratio(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free,test_required_return",
[(0.01, 0.1), (0.02, 0.2), (0.03, 0.3)],
)
def test_omega_ratio(self, test_risk_free, test_required_return):
res_a = empyrical.omega_ratio(ret['a'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_a):
res_a = np.inf
res_b = empyrical.omega_ratio(ret['b'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_b):
res_b = np.inf
res_c = empyrical.omega_ratio(ret['c'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_c):
res_c = np.inf
assert isclose(ret['a'].vbt.returns.omega_ratio(
risk_free=test_risk_free, required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.omega_ratio(risk_free=test_risk_free, required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('omega_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_omega_ratio(
ret.shape[0], minp=1, risk_free=test_risk_free, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_sharpe_ratio(self, test_risk_free):
res_a = empyrical.sharpe_ratio(ret['a'], risk_free=test_risk_free)
res_b = empyrical.sharpe_ratio(ret['b'], risk_free=test_risk_free)
res_c = empyrical.sharpe_ratio(ret['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.sharpe_ratio(risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sharpe_ratio(risk_free=test_risk_free),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_sharpe_ratio(ret.shape[0], minp=1, risk_free=test_risk_free).iloc[-1],
| pd.Series([res_a, res_b, res_c], index=ret.columns) | pandas.Series |
from collections import defaultdict
from multiprocessing import Pool
import os.path
import random
import igraph
from numpy import *
import numpy.random as nprandom
import pandas as pd
from sklearn.metrics import adjusted_rand_score
from sklearn import svm
"""
The names of the datasets used for training.
"""
TRAIN_SETS = ['afrasian', 'bai', 'chinese_1964', 'chinese_2004', 'huon',
'japanese', 'kadai', 'kamasau', 'lolo_burmese', 'mayan', 'miao_yao',
'mixe_zoque', 'mon_khmer', 'ob_ugrian', 'tujia']
"""
The names of the datasets used for testing. Note that central_asian is manually
split in two files because of file size limits.
"""
TEST_SETS = ['abvd', 'central_asian', 'central_asian_2', 'ielex']
"""
The relevant subset of features; for feature selection, simply alter this list.
"""
FEATURES = ['feature1', 'feature4', 'feature6', 'feature7', 'feature8']
"""
Module-level variables, used within the workhorse functions.
"""
training = None
trainingVectors = None
test = None
def infer(vectors_dir, output_dir):
"""
Inits and orchestrates the cognate class inferring algorithm.
"""
global training
global trainingVectors
global test
dDict = {'gloss':unicode,
'l1':unicode, 'w1':unicode, 'cc1':unicode,
'l2':unicode, 'w2':unicode, 'cc2':unicode,
'feature1':double, 'feature2':double, 'feature3':double,
'feature4':double, 'feature5':double,
'lexstat_simAA':double, 'lexstat_simBB':double, 'lexstat_simAB':double,
'feature7':double, 'target':int, 'db':unicode }
# load the training data
training = pd.DataFrame()
for dataset_name in TRAIN_SETS:
file_path = os.path.join(vectors_dir, '{}.csv'.format(dataset_name))
training = training.append(pd.read_csv(file_path, encoding='utf-8', dtype=dDict))
training['feature8'] = 1-((2*training.lexstat_simAB)/(training.lexstat_simAA+training.lexstat_simBB))
nprandom.seed(1234)
random.seed(1234)
trainingVectors = training.ix[nprandom.permutation(training.index)].drop_duplicates(['db','gloss'])
# cross-validation over training data
pool = Pool()
totalCC = pool.map(f,training.db.unique())
pool.close()
pool.terminate()
for db,wl in zip(training.db.unique(),totalCC):
file_path = os.path.join(output_dir, '{}.svmCC.csv'.format(db))
wl['fullCC'] = [':'.join(x) for x in wl[['db','concept','cc']].values]
wl[['db','concept','doculect','counterpart',
'fullCC','inferredCC']].to_csv(file_path, encoding='utf-8', index=False)
# load the test data
test = pd.DataFrame()
for dataset_name in TEST_SETS:
file_path = os.path.join(vectors_dir, '{}.csv'.format(dataset_name))
test = test.append(pd.read_csv(file_path, encoding='utf-8', dtype=dDict))
test['feature8'] = 1-((2*test.lexstat_simAB)/(test.lexstat_simAA+test.lexstat_simBB))
for db in test.db.unique():
file_path = os.path.join(output_dir, '{}.svmCC.csv'.format(db))
wl = testCluster(db)
wl.to_csv(file_path, encoding='utf-8', index=False)
def f(x):
return svmInfomapCluster(x)
def infomap_clustering(threshold, matrix, taxa=False, revert=False):
"""
Compute the Infomap clustering analysis of the data. Taken from LingPy's
implementation of the algorithm.
"""
if not igraph:
raise ValueError("The package igraph is needed to run this analysis.")
if not taxa:
taxa = list(range(1, len(matrix) + 1))
G = igraph.Graph()
vertex_weights = []
for i in range(len(matrix)):
G.add_vertex(i)
vertex_weights += [0]
# variable stores edge weights, if they are not there, the network is
# already separated by the threshold
for i,row in enumerate(matrix):
for j,cell in enumerate(row):
if i < j:
if cell <= threshold:
G.add_edge(i, j)
comps = G.community_infomap(edge_weights=None,
vertex_weights=None)
D = {}
for i,comp in enumerate(comps.subgraphs()):
vertices = [v['name'] for v in comp.vs]
for vertex in vertices:
D[vertex] = i+1
if revert:
return D
clr = defaultdict(list)
for i,t in enumerate(taxa):
clr[D[i]] += [t]
return clr
def svmInfomapCluster(vdb,featureSubset=FEATURES,th=.34,C=.82,kernel='linear',gamma=1E-3):
"""
The first argument is the validation data base, the rest of the training
databases are used for training.
"""
newWordList = pd.DataFrame()
fitting = trainingVectors[trainingVectors.db!=vdb]
validation = training[training.db==vdb].copy()
X = fitting[featureSubset].values
y = fitting.target.values
svClf = svm.SVC(kernel=kernel,C=C,gamma=gamma,
probability=True)
svClf.fit(X,y)
nprandom.seed(1234)
random.seed(1234)
svScores = svClf.predict_proba(validation[featureSubset].values)[:,1]
validation['svScores'] = svScores
scores = pd.DataFrame()
wordlist = pd.DataFrame()
concepts = validation.gloss.unique()
taxa = unique(validation[['l1','l2']].values.flatten())
dataWordlist = vstack([validation[['gloss','l1','w1','cc1']].values,
validation[['gloss','l2','w2','cc2']].values])
dataWordlist = pd.DataFrame(dataWordlist,columns=['concept','doculect',
'counterpart','cc'])
dataWordlist = dataWordlist.drop_duplicates()
dataWordlist.index = ['_'.join(map(unicode,x))
for x in
dataWordlist[['concept','doculect','counterpart']].values]
validation['id_1'] = [c+'_'+l+'_'+unicode(w)
for (c,l,w) in validation[['gloss','l1','w1']].values]
validation['id_2'] = [c+'_'+l+'_'+unicode(w)
for (c,l,w) in validation[['gloss','l2','w2']].values]
for c in concepts:
dataC= validation[validation.gloss==c].copy()
dataC['id_1'] = [x.replace(' ','').replace(',','') for x in dataC.id_1]
dataC['id_2'] = [x.replace(' ','').replace(',','') for x in dataC.id_2]
wlC = dataWordlist[dataWordlist.concept==c].copy()
if len(wlC)>1:
wlC.index = [x.replace(' ','').replace(',','') for x in wlC.index]
svMtx = zeros((len(wlC.index),len(wlC.index)))
svMtx[pd.match(dataC.id_1,wlC.index),
pd.match(dataC.id_2,wlC.index)] = dataC.svScores.values
svMtx[pd.match(dataC.id_2,wlC.index),
pd.match(dataC.id_1,wlC.index)] = dataC.svScores.values
svDistMtx = log(1-svMtx)
tth = log(th)-svDistMtx.min()
svDistMtx -= svDistMtx.min()
fill_diagonal(svDistMtx,0)
pDict = infomap_clustering(tth,svDistMtx)
pArray = vstack([c_[pDict[k],[k]*len(pDict[k])] for k in pDict.keys()])
partitionIM = pArray[argsort(pArray[:,0]),1]
else:
partitionIM = array([1])
wlC['inferredCC'] = [vdb+':'+c+':'+str(x) for x in partitionIM]
wlC['db'] = vdb
newWordList = pd.concat([newWordList,wlC])
newWordList.index = arange(len(newWordList))
return newWordList
def testCluster(vdb,featureSubset=FEATURES,C=0.82,gamma=9e-04,kernel='linear',th=.34):
"""
Inference on test data.
"""
newWordList = pd.DataFrame()
fitting = trainingVectors
validation = test[test.db==vdb].copy()
X = fitting[featureSubset].values
y = fitting.target.values
svClf = svm.SVC(kernel=kernel,C=C,gamma=gamma,
probability=True)
svClf.fit(X,y)
svScores = svClf.predict_proba(validation[featureSubset].values)[:,1]
validation['svScores'] = svScores
scores = pd.DataFrame()
wordlist = pd.DataFrame()
concepts = validation.gloss.unique()
taxa = unique(validation[['l1','l2']].values.flatten())
dataWordlist = vstack([validation[['gloss','l1','w1','cc1']].values,
validation[['gloss','l2','w2','cc2']].values])
dataWordlist = pd.DataFrame(dataWordlist,columns=['concept','doculect',
'counterpart','cc'])
dataWordlist = dataWordlist.drop_duplicates()
dataWordlist.index = ['_'.join(map(unicode,x))
for x in
dataWordlist[['concept','doculect','counterpart']].values]
validation['id_1'] = [c+'_'+l+'_'+unicode(w)
for (c,l,w) in validation[['gloss','l1','w1']].values]
validation['id_2'] = [c+'_'+l+'_'+unicode(w)
for (c,l,w) in validation[['gloss','l2','w2']].values]
for c in concepts:
dataC= validation[validation.gloss==c].copy()
dataC['id_1'] = [x.replace(' ','').replace(',','') for x in dataC.id_1]
dataC['id_2'] = [x.replace(' ','').replace(',','') for x in dataC.id_2]
wlC = dataWordlist[dataWordlist.concept==c].copy()
if len(wlC)>1:
wlC.index = [x.replace(' ','').replace(',','') for x in wlC.index]
svMtx = zeros((len(wlC.index),len(wlC.index)))
svMtx[pd.match(dataC.id_1,wlC.index),
| pd.match(dataC.id_2,wlC.index) | pandas.match |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 14 00:09:45 2018
@author: savitha
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
from sklearn.model_selection import train_test_split
from keras.layers import Input, Dense
from sklearn.preprocessing import MinMaxScaler
from keras import regularizers
from keras.models import Model#,load_model
#from keras.callbacks import ModelCheckpoint, TensorBoard
####### Data Setup
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 0
LABELS = ["Normal", "Fraud"]
df = | pd.read_csv("denbigh_data_loader.csv") | pandas.read_csv |
import sys
import os
import numpy as np
import scipy.io
import scipy.sparse
import numba
import random
import multiprocessing as mp
import subprocess
import cytoolz as toolz
import collections
from itertools import chain
import regex as re
import yaml
import logging
import time
import gzip
import pandas as pd
from functools import partial
from typing import NamedTuple
from pysam import AlignmentFile
from .util import compute_edit_distance, read_gene_map_from_gtf
from .fastq_io import read_fastq
from .barcode import ErrorBarcodeHash, ErrorBarcodeHashConstraint
from .estimate_cell_barcode import get_cell_whitelist
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s: %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
def format_fastq(*fastq, config, method, fastq_out, cb_count,
num_thread=4, max_num_cell=1000000):
"""
Merging fastq reads by putting the cell barcodes and UMI sequences
to the headers of the cDNA reads
:param config: the config file
:param method: the library preparation protocol, e.g., can be one of
10X, Drop-seq, InDrop, Seq-Well, CEL-seq2, sci-RNA-seq, SPLiT-seq,
you can add protocol to the configure file easily
by specifying the read structures.
A template configuration file is provided in scumi/config.yaml
:param fastq: input fastq files
:param fastq_out: the output fastq file
:param cb_count: an output file containing the # reads for each cell barcode
:param num_thread: int
the number of cpu cores to use
:param max_num_cell: int
the maximum number of cells
"""
with open(config, 'r') as stream:
config_dict = yaml.safe_load(stream)
config_dict = config_dict[method]
num_read = config_dict['num_read']
num_fastq = len(fastq)
if num_fastq != num_read:
logger.error(f'Error: the number of input fastq files {num_fastq} is different '
f'from the number of fastq files {num_read} detected in the config file')
sys.exit(-1)
read_regex_str, barcode_filter, read_regex_str_qual = \
zip(*[_extract_input_read_template('read' + str(i), config_dict)
for i in range(1, num_read + 1)])
barcode_filter_dict = dict()
for d in barcode_filter:
barcode_filter_dict.update(d)
read_template = _infer_read_template(read_regex_str)
# select
read_regex_list = [re.compile(z) for z in read_regex_str_qual]
format_read = partial(_format_read, read_regex_list=read_regex_list,
read_template=read_template.read_template,
cb_tag=read_template.cb_tag,
ub_len=read_template.ub_len,
barcode_filter_dict=barcode_filter_dict)
chunk_size = 8000
fastq_reader = [read_fastq(fastq_i) for fastq_i in fastq]
chunks = toolz.partition_all(chunk_size, zip(*fastq_reader))
num_cpu = mp.cpu_count()
num_thread = num_thread if num_cpu > num_thread else num_cpu
seq_chunk_obj = toolz.partition_all(num_thread, chunks)
fastq_out_all = [fastq_out + str(x) + '.gz' for x in range(num_thread)]
[gzip.open(x, 'wb').close() for x in fastq_out_all]
cb_count_all = [cb_count + str(x) + '.csv' for x in range(num_thread)]
[open(x, 'wt').close() for x in cb_count_all]
fastq_info = collections.defaultdict(collections.Counter)
iteration = 0
results = []
time_start = time.time()
pool = mp.Pool(num_thread)
for fastq_chunk in seq_chunk_obj:
res = pool.starmap_async(format_read, zip(fastq_chunk, fastq_out_all, cb_count_all))
results.append(res)
if len(results) == num_thread * 10:
results[0].wait()
while results and results[0].ready():
iteration += 1
if not (iteration % 10):
logger.info(f'Processed {iteration * chunk_size * num_thread:,d} reads!')
res = results.pop(0)
chunk_info = res.get()
_update_fastq_info(fastq_info, chunk_info)
pool.close()
pool.join()
for res in results:
chunk_info = res.get()
_update_fastq_info(fastq_info, chunk_info)
with open('.fastq_count.tsv', 'w') as f:
for k, v in fastq_info['read'].most_common():
f.write(f'{k}\t{v}\n')
cmd_cat_fastq = ' '.join(['cat'] + fastq_out_all + ['>'] + [fastq_out])
try:
subprocess.check_output(cmd_cat_fastq, shell=True)
[os.remove(fastq_file) for fastq_file in fastq_out_all]
except subprocess.CalledProcessError:
logger.info(f'Errors in concatenate fastq files')
sys.exit(-1)
except OSError:
logger.info(f'Errors in deleting fastq files')
sys.exit(-1)
time_used = time.time() - time_start
logger.info(f'Formatting fastq done, taking {time_used/3600.0:.3f} hours')
if not cb_count:
cb_count = fastq_out + '.cb_count'
df = _count_cell_barcode_umi(cb_count_all[0])
for cb_file in cb_count_all[1:]:
df1 = _count_cell_barcode_umi(cb_file)
df = pd.concat([df, df1], axis=0)
df = df.groupby(df.index).sum()
if df.shape[0] > max_num_cell * 2:
df = df.sort_values(by=df.columns[0], ascending=False)
df = df.iloc[:max_num_cell, :]
try:
[os.remove(cb_file) for cb_file in cb_count_all]
except OSError:
logger.info(f'Errors in deleting cell barcode files')
sys.exit(-1)
df = df.sort_values(by=df.columns[0], ascending=False)
if df.shape[0] > 0:
df.columns = [str(x) for x in range(df.shape[1])]
df.index.name = 'cb'
column_name = list(df.columns.values)
column_name[0] = 'cb_count'
df.columns = column_name
df.to_csv(cb_count, sep='\t')
def _update_fastq_info(fastq_info, chunk_info):
for fastq_count in chunk_info:
fastq_info['read'].update(read_pass=fastq_count[0],
read_pass_barcode=fastq_count[1],
read_pass_polyt=fastq_count[2],
read_total=fastq_count[3])
def _count_cell_barcode_umi(cb_file, chunk_size=10 ** 7):
cb_reader = pd.read_csv(cb_file, header=None, iterator=True,
sep='\t', index_col=0)
chunks = cb_reader.get_chunk(chunk_size)
chunks = chunks.groupby(chunks.index).sum()
status = True
while status:
try:
chunk = cb_reader.get_chunk(chunk_size)
chunks = pd.concat([chunks, chunk], axis=0)
chunks = chunks.groupby(chunks.index).sum()
except StopIteration:
status = False
logger.info('Read cell barcode counts done.')
return chunks
def _extract_barcode_pos(barcode_dict, config):
barcode_reg = []
pos_all = []
barcode_filter = dict()
for barcode_and_pos in barcode_dict:
barcode, pos = barcode_and_pos
pos_all.append(pos)
barcode_reg.append('(?P<' + barcode + '>.{' +
str(pos[1] - pos[0] + 1) + '})')
try:
value = config[barcode + '_value']
barcode_filter.update({barcode: ErrorBarcodeHash(value, 1)})
except KeyError:
pass
return barcode_reg, pos_all, barcode_filter
def _extract_input_read_template(read, config):
read_name = '(@.*)\\n'
read_plus = '(\\+.*)\\n'
read_qual = '(.*)\\n'
filter_dict = dict()
seq = [(key, value) for key, value in config[read].items()
if key.startswith('cDNA')]
if seq:
read_name = '@(?P<name>.*)\\n'
read_seq = '(?P<seq>.*)\\n'
read_qual = '(?P<qual>.*)\\n'
read_template = read_name + read_seq + read_plus + read_qual
return read_template, filter_dict, read_template
cell_barcode = [(key, value) for key, value in config[read].items()
if key.startswith('CB') and not key.endswith('value')]
umi = [(key, value) for key, value in config[read].items()
if key.startswith('UMI')]
poly_t = [(key, value) for key, value in config[read].items()
if key.startswith('polyT')]
cb_reg, cb_pos, cb_filter = _extract_barcode_pos(cell_barcode, config[read])
filter_dict.update(cb_filter)
umi_reg, umi_pos, _ = _extract_barcode_pos(umi, config[read])
umi_reg = [z.replace('UMI', 'UB') for z in umi_reg]
pt_reg, pt_pos, _ = _extract_barcode_pos(poly_t, config[read])
read_pos_start = [z[0] for z in cb_pos]
read_pos_start += [z[0] for z in umi_pos]
read_pos_start += [z[0] for z in pt_pos]
read_pos_end = [z[1] for z in cb_pos]
read_pos_end += [z[1] for z in umi_pos]
read_pos_end += [z[1] for z in pt_pos]
idx = sorted(range(len(read_pos_start)),
key=lambda k: read_pos_start[k])
barcode_tag = cb_reg + umi_reg + pt_reg
read_pos_start = [read_pos_start[i] for i in idx]
read_pos_end = [read_pos_end[i] for i in idx]
barcode_tag = [barcode_tag[i] for i in idx]
idx_skip = [read_pos_start[i+1] - read_pos_end[i] - 1
for i in range(0, len(read_pos_start)-1)]
barcode_skip = ['[ACGTN]{' + str(i) + '}' for i in idx_skip]
read_seq = barcode_tag[0]
for i in range(len(read_pos_start)-1):
if idx_skip[i] == 0:
read_seq += barcode_tag[i+1]
else:
read_seq += barcode_skip[i]
read_seq += barcode_tag[i+1]
filter_dict.update(_filter_ploy_t(read_seq))
if read_pos_start[0] > 1:
read_seq = '[ACGTN]{' + str(read_pos_start[0]-1) + '}'
read_seq += '[ACGTN]*'
read_seq = read_seq + '\\n'
read_template = read_name + read_seq + read_plus + read_qual
read_qual = re.sub('>', r'_qual>', read_seq)
read_qual = re.sub('\[ACGTN\]', '.', read_qual)
read_template_qual = read_name + read_seq + read_plus + read_qual
return read_template, filter_dict, read_template_qual
def _filter_ploy_t(read_seq):
match = re.findall('\?P<polyT>\.{[0-9]+}', read_seq)
poly_t_count = [int(re.findall(r'\d+', z)[0]) for z in match]
poly_t_filter = {'polyT': ErrorBarcodeHash('T' * z, 1) for z in poly_t_count}
return poly_t_filter
def _replace_poly_t(read_seq):
match = re.findall('\?P<polyT>\.{[0-9]+}', read_seq)
poly_t_count = [int(re.findall(r'\d+', z)[0]) for z in match]
poly_t = ['(' + 'T'*z + ')' + '{s<=1}' for z in poly_t_count]
for z in range(len(match)):
read_seq = read_seq.replace(match[z], poly_t[z])
return read_seq
def _infer_read_template(reg_list):
class ReadInfo(NamedTuple):
cb: bool
cb_tag: list
cb_len: list
ub: bool
ub_tag: list
ub_len: list
read_template: str
cb = ub = False
cb_tag = ub_tag = []
cb_len = ub_len = []
read_template = '@'
reg = ''.join(k for k in reg_list)
if 'CB' in reg:
logger.info('Cell barcode in configure file')
cb = True
cb_seq_template = _accumulate_barcode('CB', reg)
cb_template = ':CB_' + cb_seq_template[1]
read_template += cb_template
cb_tag = cb_seq_template[0]
cb_len = cb_seq_template[2]
if 'UB' in reg:
logger.info('UMI in config file')
ub = True
ub_seq_template = _accumulate_barcode('UB', reg)
ub_template = ':UB_' + ub_seq_template[1]
read_template += ub_template
ub_tag = ub_seq_template[0]
ub_len = ub_seq_template[2]
read_template += ':{name}'
read_template += '\n{seq}\n+\n{qual}\n'
return ReadInfo(cb=cb, cb_tag=cb_tag, cb_len=cb_len,
ub=ub, ub_tag=ub_tag, ub_len=ub_len,
read_template=read_template)
def _accumulate_barcode(barcode, seq):
barcode_num = [sub_str[0] for sub_str in
seq.split('?P<' + re.escape(barcode))][1:]
status = '>' in barcode_num
barcode_num = ['0' if x == '>' else x for x in barcode_num]
barcode_num = sorted(barcode_num, key=int)
if status:
barcode_num[0] = ''
barcode_seq = [barcode + num for num in barcode_num]
barcode_template = ['{' + tag + '}' for tag in barcode_seq]
barcode_template = '-'.join(barcode_template)
str_split = 'P<' + barcode + '[0-9]*>.{'
barcode_len = [sub_str for sub_str in re.split(str_split, seq)][1:]
barcode_len = [int(re.findall(r'(\d+)', barcode_i)[0])
for barcode_i in barcode_len]
return barcode_seq, barcode_template, barcode_len
def _format_read(chunk, fastq_file, cb_count_file, read_regex_list,
read_template, cb_tag, ub_len, barcode_filter_dict):
reads = []
num_read = len(chunk)
num_read_pass = num_read_barcode = num_read_polyt = 0
num_regex = len(read_regex_list)
barcode_counter = collections.defaultdict(
partial(np.zeros, shape=(ub_len[0] + 1), dtype=np.uint32))
ignore_read = False
for read_i in chunk:
read_dict_list = []
for i, regex_i in enumerate(read_regex_list):
read_match = regex_i.match(read_i[i])
if not read_match:
ignore_read = True
break
read_dict_list.append(read_match.groupdict())
if ignore_read:
ignore_read = False
continue
read1_dict = read_dict_list[0]
if num_regex > 1:
for regex_id in range(1, num_regex):
read1_dict.update(read_dict_list[regex_id])
cb = [barcode_filter_dict[tag][read1_dict[tag]]
if tag in barcode_filter_dict.keys() else read1_dict[tag]
for tag in cb_tag]
if all(cb):
cb = '-'.join(cb)
num_read_barcode += 1
else:
ignore_read = True
ub = read1_dict['UB']
try:
poly_t = read1_dict['polyT']
if not barcode_filter_dict['polyT'][poly_t]:
ignore_read = True
else:
num_read_polyt += 1
except KeyError:
pass
if ignore_read:
ignore_read = False
continue
num_read_pass += 1
if len(read1_dict['seq']) >= 1:
read1_dict = read_template.format_map(read1_dict)
reads.append(read1_dict)
barcode_counter[cb] += [x == 'T' for x in 'T' + ub]
with gzip.open(fastq_file, 'ab') as fastq_hd:
for read in reads:
fastq_hd.write(bytes(read, 'utf8'))
df = pd.DataFrame.from_dict(barcode_counter, orient='index')
if df.shape[0] > 0:
df = df.sort_values(by=df.columns[0], ascending=False)
df.index.name = 'cb'
column_name = list(df.columns.values)
column_name[0] = 'cb_count'
df.columns = column_name
df.to_csv(cb_count_file, sep='\t', mode='a', header=False)
return num_read_pass, num_read_barcode, num_read_polyt, num_read
def _construct_barcode_regex(bam):
read_mode = 'r' if bam.endswith('.sam') else 'rb'
bam_file = AlignmentFile(bam, mode=read_mode)
first_alignment = next(bam_file)
bam_file.close()
barcodes = set()
for barcode in ['CB_', 'UB_']:
if barcode in first_alignment.qname:
barcodes.add(barcode)
barcode_parser = '.*'
if 'CB_' in barcodes:
barcode_parser += ':CB_(?P<CB>[A-Z\-]+)'
if 'UB_' in barcodes:
barcode_parser += ':UB_(?P<UB>[A-Z\-]+)'
if barcode_parser == '.*':
logger.error('Error: no cell barcodes and UMIs.')
sys.exit(-1)
barcode_parser += ':*'
barcode_parser = re.compile(barcode_parser)
match = barcode_parser.match(first_alignment.qname)
cb = _extract_tag(match, 'CB')
return barcode_parser, cb, read_mode
def _extract_tag(match, tag):
try:
tag = match.group(tag)
except IndexError:
tag = None
return tag
def count_feature(*cb, bam, molecular_info_h5, gtf, cb_count, feature_tag='XT:Z',
expect_cell=False, force_cell=False, all_cell=False,
depth_threshold=1, cell_barcode_whitelist=None):
"""
Count the number of reads/UMIs mapped to each gene
:param bam: the input sam/bam file
:param molecular_info_h5: output the molecular info
:param cb: the input cell barcode files, can be empty or None
:param cell_barcode_whitelist: a file contain the selected cell barcodes
:param gtf: a GTF file
:param cb_count: a file containing the number of reads mapped to each cell barcode,
output from format_fastq
:param feature_tag: the tag representing genes in the input bam file
:param depth_threshold: only considering UMIs that have at least
depth_threshold reads support
:param expect_cell: the expected number of cells in the bam file
:param force_cell: force to return the number of cells set by expect_cell
:param all_cell: keep all cell barcodes - can be very slow
"""
barcode_parser, first_cb, read_mode = _construct_barcode_regex(bam)
num_cb = len(first_cb.split('-'))
num_cb_file = len(cb)
if 0 == num_cb_file:
cb = [None] * num_cb
elif num_cb != num_cb_file:
logger.error(f'Error: the number of input cell barcodes files {num_cb_file} '
f'is different from the number of cell barcodes {num_cb} '
f'detected in the bam file')
if num_cb > num_cb_file:
cb = cb + [None] * (num_cb - num_cb_file)
else:
cb = cb[:num_cb]
# TODO: no cell barcodes detected
correct_cb_fun, cb_list, cb_remove = _construct_cb_filter(
cb_count, cb, expect_cell, force_cell, all_cell, cell_barcode_whitelist)
gene_map_dict = read_gene_map_from_gtf(gtf)
logger.info('Counting molecular info')
time_start_count = time.time()
sam_file = AlignmentFile(bam, mode=read_mode)
_count_feature_partial = partial(_count_feature,
gene_map_dict=gene_map_dict,
barcode_parser=barcode_parser,
correct_cb_fun=correct_cb_fun,
sam_file=sam_file,
feature_tag=feature_tag)
track = sam_file.fetch(until_eof=True)
map_info, read_in_cell, molecular_info = _count_feature_partial(track)
time_count = time.time() - time_start_count
logger.info(f'Counting molecular info done - {time_count/3600.0:.3f} hours, '
f'{int(3600.0 * map_info["num_alignment"]/time_count):,d} '
f'alignments/hour\n')
# TODO: still output results
if len(molecular_info) == 0:
logger.error('Error: no reads mapped to features.')
sys.exit(-1)
name = ['cell',
'gene',
'umi',
'depth',
]
logger.info('Converting to a dataframe')
convert_time = time.time()
molecular_info = pd.Series(molecular_info).reset_index()
molecular_info.columns = name
for col in name[:3]:
molecular_info.loc[:, col] = molecular_info[col].astype('category')
convert_time = time.time() - convert_time
logger.info(f'Converting to a dataframe done, '
f'taking {convert_time/60.0:.3f} minutes\n')
molecular_info.columns = name
if num_cb > 1 and cb_list:
molecular_info = molecular_info.loc[molecular_info['cell'].isin(cb_list), :]
if cb_remove:
molecular_info = molecular_info.loc[~molecular_info['cell'].isin(cb_remove), :]
molecular_info = molecular_info.loc[molecular_info['depth'] >= 0.95, :]
molecular_info['depth'] = \
np.floor(molecular_info['depth'].values + 0.5).astype('uint32')
molecular_info = molecular_info.sort_values(name[:3])
molecular_info = molecular_info.reset_index(drop=True)
map_info = pd.Series(map_info)
read_in_cell = | pd.DataFrame.from_dict(read_in_cell, orient='index') | pandas.DataFrame.from_dict |
"""
intent_reports.py
"""
import pandas as pd
from tabulate import tabulate
from ipfabric import IPFClient
# Requires openpyxl also for Excel reports
if __name__ == "__main__":
ipf = IPFClient()
# ipf = IPFClient('https://demo3.ipfabric.io/', token='<PASSWORD>', verify=False, timeout=15)
ipf.intent.load_intent() # Load Intent Checks
# ipf.intent.load_intent('$prev') Load a different snapshot into the class overriding the client.
compare = ipf.intent.compare_snapshot("$lastLocked", reverse=True)
print(tabulate(compare, headers="keys"))
"""
Current: The snapshot loaded into the intent class:
ipf.intent.load_intent('$last')
Other: The snapshot in the comparison:
ipf.intent.compare_snapshot('$prev', reverse=True)
Reverse (Default: False): Will flip current and other. Use when class is newest date and compare is an older date.
name id check loaded_snapshot compare_snapshot diff
-------------------------------------------- ---------- ------- ----------------- ------------------ ------
CDP/LLDP unidirectional 320633253 total 25 18 -7
CDP/LLDP unidirectional 320633253 blue 25 18 -7
BGP Session Age 322316677 total 367 358 -9
BGP Session Age 322316677 green 309 305 -4
BGP Session Age 322316677 blue 22 19 -3
BGP Session Age 322316677 amber 3 0 -3
BGP Session Age 322316677 red 33 33 0
"""
intents, intents_with_groups = list(), list()
for intent in ipf.intent.intent_checks:
row = [intent.name, intent.result.checks.green, intent.result.checks.blue,
intent.result.checks.amber, intent.result.checks.red]
intents.append(row)
if not intent.groups:
intents_with_groups.append([None, *row])
for group in intent.groups:
intents_with_groups.append([group.name, *row])
columns = ['Intent Name', 'Green', 'Blue', 'Amber', 'Red']
intent_df = | pd.DataFrame(intents, columns=columns) | pandas.DataFrame |
"""Functions to access data from Copernicus Atmosphere Monitoring Service
(CAMS) radiation service.
.. codeauthor:: <NAME><<EMAIL>>
"""
import pandas as pd
import requests
import io
import warnings
CAMS_INTEGRATED_COLUMNS = [
'TOA', 'Clear sky GHI', 'Clear sky BHI', 'Clear sky DHI', 'Clear sky BNI',
'GHI', 'BHI', 'DHI', 'BNI',
'GHI no corr', 'BHI no corr', 'DHI no corr', 'BNI no corr']
# Dictionary mapping CAMS Radiation and McClear variables to pvlib names
CAMS_VARIABLE_MAP = {
'TOA': 'ghi_extra',
'Clear sky GHI': 'ghi_clear',
'Clear sky BHI': 'bhi_clear',
'Clear sky DHI': 'dhi_clear',
'Clear sky BNI': 'dni_clear',
'GHI': 'ghi',
'BHI': 'bhi',
'DHI': 'dhi',
'BNI': 'dni',
'sza': 'solar_zenith',
}
# Dictionary mapping time steps to CAMS time step format
TIME_STEPS_MAP = {'1min': 'PT01M', '15min': 'PT15M', '1h': 'PT01H',
'1d': 'P01D', '1M': 'P01M'}
TIME_STEPS_IN_HOURS = {'1min': 1/60, '15min': 15/60, '1h': 1, '1d': 24}
SUMMATION_PERIOD_TO_TIME_STEP = {'0 year 0 month 0 day 0 h 1 min 0 s': '1min',
'0 year 0 month 0 day 0 h 15 min 0 s': '15min', # noqa
'0 year 0 month 0 day 1 h 0 min 0 s': '1h',
'0 year 0 month 1 day 0 h 0 min 0 s': '1d',
'0 year 1 month 0 day 0 h 0 min 0 s': '1M'}
def get_cams(start, end, latitude, longitude, email, identifier='mcclear',
altitude=None, time_step='1h', time_ref='UT', verbose=False,
integrated=False, label=None, map_variables=True,
server='www.soda-is.com', timeout=30):
"""
Retrieve time-series of radiation and/or clear-sky global, beam, and
diffuse radiation from CAMS. Data from CAMS Radiation [1]_ and CAMS McClear
[2]_ are retrieved from SoDa [3]_.
Time coverage: 2004-01-01 to two days ago
Access: free, but requires registration, see [1]_
Requests: max. 100 per day
Geographical coverage: Wordwide for CAMS McClear and -66° to 66° in both
latitude and longitude for CAMS Radiation
Parameters
----------
start: datetime like
First day of the requested period
end: datetime like
Last day of the requested period
latitude: float
in decimal degrees, between -90 and 90, north is positive (ISO 19115)
longitude : float
in decimal degrees, between -180 and 180, east is positive (ISO 19115)
email: str
Email address linked to a SoDa account
identifier: {'mcclear', 'cams_radiation'}
Specify whether to retrieve CAMS Radiation or McClear parameters
altitude: float, default: None
Altitude in meters. If None, then the altitude is determined from the
NASA SRTM database
time_step: str, {'1min', '15min', '1h', '1d', '1M'}, default: '1h'
Time step of the time series, either 1 minute, 15 minute, hourly,
daily, or monthly.
time_ref: str, {'UT', 'TST'}, default: 'UT'
'UT' (universal time) or 'TST' (True Solar Time)
verbose: boolean, default: False
Verbose mode outputs additional parameters (aerosols). Only available
for 1 minute and universal time. See [1]_ for parameter description.
integrated: boolean, default False
Whether to return radiation parameters as integrated values (Wh/m^2)
or as average irradiance values (W/m^2) (pvlib preferred units)
label: {'right', 'left'}, default: None
Which bin edge label to label time-step with. The default is 'left' for
all time steps except for '1M' which has a default of 'right'.
map_variables: bool, default: True
When true, renames columns of the DataFrame to pvlib variable names
where applicable. See variable CAMS_VARIABLE_MAP.
server: str, default: 'www.soda-is.com'
Main server (www.soda-is.com) or backup mirror server (pro.soda-is.com)
timeout : int, default 30
Time in seconds to wait for server response before timeout
Returns
-------
data: pandas.DataFrame
Timeseries data, see Notes for columns
metadata: dict
Metadata of the requested time-series
Notes
-----
In order to use the CAMS services, users must register for a free SoDa
account using an email address [1]_.
The returned data DataFrame includes the following fields:
======================== ====== =========================================
Key, mapped key Format Description
======================== ====== =========================================
**Mapped field names are returned when the map_variables argument is True**
---------------------------------------------------------------------------
Observation period str Beginning/end of time period
TOA, ghi_extra float Horizontal radiation at top of atmosphere
Clear sky GHI, ghi_clear float Clear sky global radiation on horizontal
Clear sky BHI, bhi_clear float Clear sky beam radiation on horizontal
Clear sky DHI, dhi_clear float Clear sky diffuse radiation on horizontal
Clear sky BNI, dni_clear float Clear sky beam radiation normal to sun
GHI, ghi† float Global horizontal radiation
BHI, bhi† float Beam (direct) radiation on horizontal
DHI, dhi† float Diffuse horizontal radiation
BNI, dni† float Beam (direct) radiation normal to the sun
Reliability† float Reliable data fraction in summarization
======================== ====== =========================================
†Parameters only returned if identifier='cams_radiation'. For description
of additional output parameters in verbose mode, see [1]_ and [2]_.
Note that it is recommended to specify the latitude and longitude to at
least the fourth decimal place.
Variables corresponding to standard pvlib variables are renamed,
e.g. `sza` becomes `solar_zenith`. See the
`pvlib.iotools.cams.CAMS_VARIABLE_MAP` dict for the complete
mapping.
See Also
--------
pvlib.iotools.read_cams, pvlib.iotools.parse_cams
Raises
------
requests.HTTPError
If the request is invalid, then an XML file is returned by the CAMS
service and the error message will be raised as an exception.
References
----------
.. [1] `CAMS Radiation Service Info
<http://www.soda-pro.com/web-services/radiation/cams-radiation-service/info>`_
.. [2] `CAMS McClear Service Info
<http://www.soda-pro.com/web-services/radiation/cams-mcclear/info>`_
.. [3] `CAMS McClear Automatic Access
<http://www.soda-pro.com/help/cams-services/cams-mcclear-service/automatic-access>`_
"""
try:
time_step_str = TIME_STEPS_MAP[time_step]
except KeyError:
raise ValueError(f'Time step not recognized. Must be one of '
f'{list(TIME_STEPS_MAP.keys())}')
if (verbose) and ((time_step != '1min') or (time_ref != 'UT')):
verbose = False
warnings.warn("Verbose mode only supports 1 min. UT time series!")
if identifier not in ['mcclear', 'cams_radiation']:
raise ValueError('Identifier must be either mcclear or cams_radiation')
# Format verbose variable to the required format: {'true', 'false'}
verbose = str(verbose).lower()
if altitude is None: # Let SoDa get elevation from the NASA SRTM database
altitude = -999
# Start and end date should be in the format: yyyy-mm-dd
start = start.strftime('%Y-%m-%d')
end = end.strftime('%Y-%m-%d')
email = email.replace('@', '%2540') # Format email address
identifier = 'get_{}'.format(identifier.lower()) # Format identifier str
base_url = f"http://{server}/service/wps"
data_inputs_dict = {
'latitude': latitude,
'longitude': longitude,
'altitude': altitude,
'date_begin': start,
'date_end': end,
'time_ref': time_ref,
'summarization': time_step_str,
'username': email,
'verbose': verbose}
# Manual formatting of the input parameters seperating each by a semicolon
data_inputs = ";".join([f"{key}={value}" for key, value in
data_inputs_dict.items()])
params = {'Service': 'WPS',
'Request': 'Execute',
'Identifier': identifier,
'version': '1.0.0',
'RawDataOutput': 'irradiation',
}
# The DataInputs parameter of the URL has to be manually formatted and
# added to the base URL as it contains sub-parameters seperated by
# semi-colons, which gets incorrectly formatted by the requests function
# if passed using the params argument.
res = requests.get(base_url + '?DataInputs=' + data_inputs, params=params,
timeout=timeout)
# Invalid requests returns an XML error message and the HTTP staus code 200
# as if the request was successful. Therefore, errors cannot be handled
# automatic (e.g. res.raise_for_status()) and errors are handled manually
if res.headers['Content-Type'] == 'application/xml':
errors = res.text.split('ows:ExceptionText')[1][1:-2]
raise requests.HTTPError(errors, response=res)
# Successful requests returns a csv data file
elif res.headers['Content-Type'] == 'application/csv':
fbuf = io.StringIO(res.content.decode('utf-8'))
data, metadata = parse_cams(fbuf, integrated=integrated, label=label,
map_variables=map_variables)
return data, metadata
def parse_cams(fbuf, integrated=False, label=None, map_variables=True):
"""
Parse a file-like buffer with data in the format of a CAMS Radiation or
McClear file. The CAMS services are described in [1]_ and [2]_.
Parameters
----------
fbuf: file-like object
File-like object containing data to read.
integrated: boolean, default False
Whether to return radiation parameters as integrated values (Wh/m^2)
or as average irradiance values (W/m^2) (pvlib preferred units)
label: {'right', 'left'}, default: None
Which bin edge label to label time-step with. The default is 'left' for
all time steps except for '1M' which has a default of 'right'.
map_variables: bool, default: True
When true, renames columns of the Dataframe to pvlib variable names
where applicable. See variable CAMS_VARIABLE_MAP.
Returns
-------
data: pandas.DataFrame
Timeseries data from CAMS Radiation or McClear
metadata: dict
Metadata available in the file.
See Also
--------
pvlib.iotools.read_cams, pvlib.iotools.get_cams
References
----------
.. [1] `CAMS Radiation Service Info
<http://www.soda-pro.com/web-services/radiation/cams-radiation-service/info>`_
.. [2] `CAMS McClear Service Info
<http://www.soda-pro.com/web-services/radiation/cams-mcclear/info>`_
"""
metadata = {}
# Initial lines starting with # contain metadata
while True:
line = fbuf.readline().rstrip('\n')
if line.startswith('# Observation period'):
# The last line of the metadata section contains the column names
names = line.lstrip('# ').split(';')
break # End of metadata section has been reached
elif ': ' in line:
metadata[line.split(': ')[0].lstrip('# ')] = line.split(': ')[1]
# Convert latitude, longitude, and altitude values from strings to floats
for k_old in list(metadata.keys()):
k_new = k_old.lstrip().split(' ')[0].lower()
if k_new in ['latitude', 'longitude', 'altitude']:
metadata[k_new] = float(metadata.pop(k_old))
metadata['radiation_unit'] = \
{True: 'Wh/m^2', False: 'W/m^2'}[integrated]
# Determine the time_step from the metadata dictionary
time_step = SUMMATION_PERIOD_TO_TIME_STEP[
metadata['Summarization (integration) period']]
metadata['time_step'] = time_step
data = pd.read_csv(fbuf, sep=';', comment='#', header=None, names=names)
obs_period = data['Observation period'].str.split('/')
# Set index as the start observation time (left) and localize to UTC
if (label == 'left') | ((label is None) & (time_step != '1M')):
data.index = pd.to_datetime(obs_period.str[0], utc=True)
# Set index as the stop observation time (right) and localize to UTC
# default label for monthly data is 'right' following Pandas' convention
elif (label == 'right') | ((label is None) & (time_step == '1M')):
data.index = pd.to_datetime(obs_period.str[1], utc=True)
# For time_steps '1d' and '1M', drop timezone and round to nearest midnight
if (time_step == '1d') | (time_step == '1M'):
data.index = pd.DatetimeIndex(data.index.date)
# For monthly data with 'right' label, the index should be the last
# date of the month and not the first date of the following month
if (time_step == '1M') & (label != 'left'):
data.index = data.index - | pd.Timedelta(days=1) | pandas.Timedelta |
import pandas as pd
from datetime import datetime
import finnhub
import numpy as np
import matplotlib
import matplotlib.pyplot as plot
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import activations
from tensorflow.keras.callbacks import *
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dropout
import matplotlib.pyplot as plt
import mplfinance as mpf
from tensorflow.keras.models import load_model
from datetime import datetime
import time
import os
import base64
from io import BytesIO
# FINNHUB_KEY = os.environ['FINNHUB']
class MakePrediction:
def __init__(self, ticker):
self.ticker = ticker
# self.client = finnhub.Client(api_key=FINNHUB_KEY)
self.client = finnhub.Client(api_key='<KEY>')
def get_candlestick_data(self, ticker, timeframe, start, end):
'''
Makes call to finnhub api and returns the processed response as a dataframe
'''
data = self.client.stock_candles(ticker, timeframe, start, end)
del data['s']
df = pd.DataFrame.from_dict(data)
df['t'] = df['t'].apply(lambda x: datetime.fromtimestamp(x))
df = df.rename(columns={'c': 'Close', 'h': 'High', 'l': 'Low', 'o': 'Open', 't': 'Date', 'v': 'Volume'})
df.set_index('Date', inplace=True)
df = df[['Open', 'High', 'Low', 'Close', 'Volume']]
return df
def stock_data_253(self):
'''
Uses get_candlestick_data to get candlestick data for a given stock and returns the df with the most recent 253 rows
'''
current_time = int(time.time())
prev_time = current_time - 46656000
df = self.get_candlestick_data(self.ticker, 'D', prev_time, current_time)
df = df.iloc[len(df)-253:]
return df
def get_prediction(self):
'''
Uses other helper functions and returns a nested array of 7 days of stock predictions. Each day is an array with 5 price points
'''
data = self.stock_data_253()
open_data = data.iloc[:, 0:5].to_numpy()
scaler = MinMaxScaler(feature_range = (0, 1))
open_data = scaler.fit_transform(data)
x_test = [open_data[0:253]]
x_test = np.asarray(x_test)
model = load_model('bull_and_bear/minimize_size_weights.hdf5')
stock_prediction = model.predict(x_test)
stock_prediction = scaler.inverse_transform(stock_prediction.reshape(-1, stock_prediction.shape[-1])).reshape(stock_prediction.shape)
return stock_prediction[0]
def get_prediction_df(self):
'''
Post processing the get_prediction results, putting them in a df with correct date index and column names, appends to 2 months of historical data, returns the appended df
'''
stock_prediction = self.get_prediction()
prediction_df = [i for i in stock_prediction]
date_index = []
for i in range(7):
date_index.append(datetime.fromtimestamp((i*86400) + int(time.time())))
prediction_df = | pd.DataFrame(data=prediction_df, index=date_index) | pandas.DataFrame |
import pandas as pd
import numpy as np
fn = '2019_survey/2019 Kubernetes Contributor Experience Survey PUBLIC.csv'
contribute_header = "What areas of Kubernetes do you contribute to? Please check all that apply."
blockers_header = "Please rate any challenges to the listed steps of the contribution process"
agree_header = "Do you agree with the following statements (1 - strongly disagree, 5 - strongly agree):"
attend_header = "Which of the below would make you likely to attend more of the Community Meetings? Check all that apply."
most_important_proj_header = "Some of the major projects SIG Contributor Experience is working on are listed below, rank the ones that are most important to you (and/or your SIG)"
use_freq_header = "Of our various communications channels, please rate which ones you use and/or check most frequently on a 1-5 scale, where 1 is “never”, 3 is “several times a month” and 5 is “every day”."
news_header = "Which of these channels is most likely to reach you first for news about decisions, changes, additions, and/or announcements to the contributor process or community matters?"
def map_blocker_and_usefreq_vals(val):
try:
return int(val)
except ValueError:
return int(val[0])
def process_header(df):
columns = list(df.columns)
new_columns = [None]*len(columns)
for i, col in enumerate(columns):
if col[1].startswith("Unnamed") or col[1] == "Response":
new_columns[i] = col[0]
continue
# Find the starting column for the multilabel responses (checkboxes)
# that were also in the 2018 survey
if col[0] == blockers_header:
blockers_i = i
elif col[0] == contribute_header:
contribute_i = i
elif col[0] == news_header:
news_i = i
elif col[0] == use_freq_header:
use_freq_i = i
elif col[0] == most_important_proj_header:
most_important_proj_i = i
elif col[0] == agree_header: # Starting columns for multilabel responses that weren't in the 2018 survey.
agree_i = i
elif col[0] == attend_header:
attend_i = i
#elif col[0] == unattendance_header:
# unattendance_i = i
else: # Handle open ended responses
new_columns[i] = col[0]
def prefix_cols(header, header_i, prefix):
i = header_i
while i < len(columns) and (columns[i][0].startswith("Unnamed") or columns[i][0] == header):
new_columns[i] = "{} {}".format(prefix, columns[i][1])
i += 1
prefix_cols(contribute_header, contribute_i, "Contribute:")
prefix_cols(blockers_header, blockers_i, "Blocker:")
prefix_cols(news_header, news_i, "Check for news:")
prefix_cols(use_freq_header, use_freq_i, "Use freq:")
prefix_cols(most_important_proj_header, most_important_proj_i, "Most Important Project:")
prefix_cols(agree_header, agree_i, "Agree:")
prefix_cols(attend_header, attend_i, "Would attend if:")
df.columns = new_columns
def get_df(file_name=None):
fn = '2019_survey/2019 Kubernetes Contributor Experience Survey PUBLIC.csv'
if file_name:
fn = file_name
df = pd.read_csv(fn, header=[0,1], skipinitialspace=True)
process_header(df)
df = df.rename(columns={
"How long have you been contributing to Kubernetes?": "Contributing_Length",
"What level of the Contributor Ladder do you consider yourself to be on? (pick the highest if you are in multiple OWNERs files)": "Level_of_Contributor",
"What level of the Contributor Ladder do you consider yourself to be on? (pick the highest if you are in multiple OWNERs files)": "Level_of_Contributor",
"What region of the world are you in?": "World_Region",
"Are you interested in advancing to the next level of the Contributor Ladder?": "Interested_in_next_level",
"How many other open source projects not in the Kubernetes ecosystem do you contribute to? (example: nodejs, debian)":"Contribute_to_other_OSS",
"Does your employer support your contributions to Kubernetes?":"Upstream_supported_at_employer",
"Blocker: Other (please specify)": "Other blockers (please specify)",
"What region of the world are you in?": "World Region",
})
def map_blocker_and_usefreq_vals(val):
try:
return int(val)
except ValueError:
return int(val[0])
#Clean Data
for x in df.columns:
if x.startswith("Useful:"):
df = df.assign(**{x: df[x].fillna(0)})
if x.startswith("Contribute:") or x.startswith("Check for news:") or x.startswith("Attended:") or x.startswith("Attending:") or x.startswith("Would attend if:"):
df = df.assign(**{x: np.where(df[x].isna(),0,1)})
if x.startswith('Upstream'):
df = df.assign(**{x: df[x].fillna("Didn't Answer")})
if x.startswith("Blocker:") and x != "Blocker: Other (please specify)":
df[x] = df[x].map(map_blocker_and_usefreq_vals, na_action="ignore")
if x.startswith("Use freq:") or x.startswith("Agree:"):
df[x] = df[x].map(map_blocker_and_usefreq_vals, na_action="ignore")
df = df.rename(columns= {x:x.replace(" ","_").replace("?", "").replace('Most_Important_Project','Most_Important_Proj').replace('Most_Important_Prj','Most_Important_Proj') for x in df.columns})
x = | pd.to_datetime(df.End_Date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 13:17:47 2018
@author: JHodges
"""
import numpy as np
import matplotlib
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
#matplotlib.rcParams['text.usetex'] = True
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
from scipy.ndimage.interpolation import zoom
import pandas as pd
import util_common as uc
import os
class FuelModel(object):
''' This class contains a fuel model for wildfire spread.
Fields:
Functions:
'''
__slots__ = ['id','code','name',
'fuelBedDepth','moistureOfExtinctionDeadFuel','heatOfCombustionDeadFuel','heatOfCombustionLiveFuel',
'fuelLoad1Hour','fuelLoad10Hour','fuelLoad100Hour','fuelLoadLiveHerb','fuelLoadLiveWood',
'savr1HourFuel','savrLiveHerb','savrLiveWood',
'isDynamic','isReserved']
def __init__(self,idNumber,idCode,idName,
fuelBedDepth,moistureOfExtinctionDeadFuel,heatOfCombustionDeadFuel,heatOfCombustionLiveFuel,
fuelLoad1Hour,fuelLoad10Hour,fuelLoad100Hour,fuelLoadLiveHerb,fuelLoadLiveWood,
savr1HourFuel,savrLiveHerb,savrLiveWood,
isDynamic, isReserved):
self.id = idNumber
self.code = idCode
self.name = idName
self.fuelBedDepth = fuelBedDepth
self.moistureOfExtinctionDeadFuel = moistureOfExtinctionDeadFuel
self.heatOfCombustionDeadFuel = heatOfCombustionDeadFuel
self.heatOfCombustionLiveFuel = heatOfCombustionLiveFuel
self.fuelLoad1Hour = fuelLoad1Hour
self.fuelLoad10Hour = fuelLoad10Hour
self.fuelLoad100Hour = fuelLoad100Hour
self.fuelLoadLiveHerb = fuelLoadLiveHerb
self.fuelLoadLiveWood = fuelLoadLiveWood
self.savr1HourFuel = savr1HourFuel
self.savrLiveHerb = savrLiveHerb
self.savrLiveWood = savrLiveWood
self.isDynamic = isDynamic
self.isReserved = isReserved
def __str__(self):
''' This function prints summary information of the object when a
string is requested.
'''
string = "Fuel Model\n"
string = string + "\tID:\t\t%s\n"%(str(self.id))
string = string + "\tCode:\t%s\n"%(str(self.code))
string = string + "\tName:\t%s\n"%(str(self.name))
return string
def __repr__(self):
''' This function prints summary information of the object when a
string is requested.
'''
return self.__str__()
class lifeStateIntermediate(object):
__slots__ = ['dead','live']
def __init__(self,fuelModel,moistureDead,moistureLive):
savrDead, savrLive = getSAV(fuelModel)
deadFraction, liveFraction, deadFractionTotal, liveFractionTotal = getDLFraction(fuelModel,moistureLive)
loadDead, loadLive = getFuelLoad(fuelModel,moistureLive)
heatCDead, heatCLive = getHeatOfCombustion(fuelModel)
heatDead = np.zeros((len(savrDead),))+heatCDead
heatLive = np.zeros((len(savrLive),))+heatCLive
heatLive[liveFraction == 0] = 0
moistureLive.extend([0,0])
self.dead = self.calculateIntermediates(fuelModel,savrDead,loadDead,deadFraction,heatDead,moistureDead)
self.live = self.calculateIntermediates(fuelModel,savrLive,loadLive,liveFraction,heatLive,moistureLive)
def calculateIntermediates(self,fuelModel,savr,load,fraction,heat,moisture):
totalSilicaContent = 0.0555 # Rothermel 1972
silicaEffective = np.zeros((len(savr),))+0.01 # From behavePlus source, should be 0 if no fuel
wn = np.zeros((len(savr),))
weightedHeat = 0.0
weightedSilica = 0.0
weightedMoisture = 0.0
weightedSavr = 0.0
totalLoadForLifeState = 0.0
"""
for i in range(0,len(moisture)):
wn[i] = load[i]*(1.0-totalSilicaContent)
weightedHeat = weightedHeat + fraction[i] * heat[i]
weightedSilica = weightedSilica + fraction[i] * silicaEffective
weightedMoisture = weightedMoisture + fraction[i]*moisture[i]
weightedSavr = weightedSavr + fraction[i] * savr[i]
totalLoadForLifeState = totalLoadForLifeState + load[i]
"""
#print(fraction,moisture)
wn = [x*(1.0-totalSilicaContent) for x in load] #wn[i] = load[i]*(1.0-totalSilicaContent)
weightedHeat = np.dot(fraction,heat) #weightedHeat = weightedHeat + fraction[i] * heat[i]
weightedSilica = np.dot(fraction,silicaEffective) #weightedSilica = weightedSilica + fraction[i] * silicaEffective
weightedMoisture = np.dot(fraction,moisture) #weightedMoisture = weightedMoisture + fraction[i]*moisture[i]
weightedSavr = np.dot(fraction,savr) #weightedSavr = weightedSavr + fraction[i] * savr[i]
totalLoadForLifeState = np.sum(load) #totalLoadForLifeState = totalLoadForLifeState + load[i]
if fuelModel.isDynamic and False:
weightedFuelLoad = np.sum(wn) # This gives better agreement with
# behavePlus for dynamic fuel models;
# however, the source code for
# BehavePlus shows this should be
# weightedFuelLoad=np.dot(fraction,wn)
else:
weightedFuelLoad = np.dot(wn,fraction)
return [weightedHeat,weightedSilica,weightedMoisture,weightedSavr,totalLoadForLifeState,weightedFuelLoad]
def buildFuelModels(allowDynamicModels=True,allowNonBurningModels=False):
"""
fuelModelNumber, code, name
fuelBedDepth, moistureOfExtinctionDeadFuel, heatOfCombustionDeadFuel, heatOfCombustionLiveFuel,
fuelLoad1Hour, fuelLoad10Hour, fuelLoad100Hour, fuelLoadLiveHerb, fuelLoadLiveWood,
savr1HourFuel, savrLiveHerb, savrLiveWood,
isDynamic, isReserved
- WMC 10/2015
"""
fuelModels = dict()
# Code FMx: Original 13 Fuel Models
fuelModels["FM1"] = FuelModel(
1, "FM1", "Short grass [1]",
1.0, 0.12, 8000, 8000,
0.034, 0, 0, 0, 0,
3500, 1500, 1500,
False, True)
fuelModels["FM2"] = FuelModel(
2, "FM2", "Timber grass and understory [2]",
1.0, 0.15, 8000, 8000,
0.092, 0.046, 0.023, 0.023,
0,3000, 1500, 1500,
False, True)
fuelModels["FM3"] = FuelModel(
3, "FM3", "Tall grass [3]",
2.5, 0.25, 8000, 8000,
0.138, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["FM4"] = FuelModel(
4, "FM4", "Chaparral [4]",
6.0, 0.2, 8000, 8000,
0.230, 0.184, 0.092, 0, 0.230,
2000, 1500, 1500,
False, True)
fuelModels["FM5"] = FuelModel(
5, "FM5", "Brush [5]",
2.0, 0.20, 8000, 8000,
0.046, 0.023, 0, 0, 0.092,
2000, 1500, 1500,
False, True)
fuelModels["FM6"] = FuelModel(
6, "FM6", "Dormant brush, hardwood slash [6]",
2.5, 0.25, 8000, 8000,
0.069, 0.115, 0.092, 0, 0,
1750, 1500, 1500,
False, True)
fuelModels["FM7"] = FuelModel(
7, "FM7", "Southern rough [7]",
2.5, 0.40, 8000, 8000,
0.052, 0.086, 0.069, 0, 0.017,
1750, 1500, 1500,
False, True)
fuelModels["FM8"] = FuelModel(
8, "FM8", "Short needle litter [8]",
0.2, 0.3, 8000, 8000,
0.069, 0.046, 0.115, 0, 0,
2000, 1500, 1500,
False, True)
fuelModels["FM9"] = FuelModel(
9, "FM9", "Long needle or hardwood litter [9]",
0.2, 0.25, 8000, 8000,
0.134, 0.019, 0.007, 0, 0,
2500, 1500, 1500,
False, True)
fuelModels["FM10"] = FuelModel(
10, "FM10", "Timber litter & understory [10]",
1.0, 0.25, 8000, 8000,
0.138, 0.092, 0.230, 0, 0.092,
2000, 1500, 1500,
False, True)
fuelModels["FM11"] = FuelModel(
11, "FM11", "Light logging slash [11]",
1.0, 0.15, 8000, 8000,
0.069, 0.207, 0.253, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["FM12"] = FuelModel(
12, "FM12", "Medium logging slash [12]",
2.3, 0.20, 8000, 8000,
0.184, 0.644, 0.759, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["FM13"] = FuelModel(
13, "FM13", "Heavy logging slash [13]",
3.0, 0.25, 8000, 8000,
0.322, 1.058, 1.288, 0, 0,
1500, 1500, 1500,
False, True)
if not allowDynamicModels:
return fuelModels
else:
pass
# 14-89 Available for custom models
if allowNonBurningModels:
# Code NBx: Non-burnable
# 90 Available for custom NB model
fuelModels["NB1"] = FuelModel(
91, "NB1", "Urban, developed [91]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["NB2"] = FuelModel(
92, "NB2", "Snow, ice [92]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["NB3"] = FuelModel(
93, "NB3", "Agricultural [93]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
# Indices 94-95 Reserved for future standard non-burnable models
fuelModels["NB4"] = FuelModel(
94, "NB4", "Future standard non-burnable [94]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["NB5"] = FuelModel(
95, "NB5", "Future standard non-burnable [95]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
# Indices 96-97 Available for custom NB model
fuelModels["NB8"] = FuelModel(
98, "NB8", "Open water [98]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
fuelModels["NB9"] = FuelModel(
99, "NB9", "Bare ground [99]",
1.0, 0.10, 8000, 8000,
0, 0, 0, 0, 0,
1500, 1500, 1500,
False, True)
# Code GRx: Grass
# Index 100 Available for custom GR model
f = 2000.0 / 43560.0
fuelModels["GR1"] = FuelModel(
101, "GR1", "Short, sparse, dry climate grass (D)",
0.4, 0.15, 8000, 8000,
0.10*f, 0, 0, 0.30*f, 0,
2200, 2000, 1500,
True, True)
fuelModels["GR2"] = FuelModel(
102, "GR2", "Low load, dry climate grass (D)",
1.0, 0.15, 8000, 8000,
0.10*f, 0, 0, 1.0*f, 0,
2000, 1800, 1500,
True, True)
fuelModels["GR3"] = FuelModel(
103, "GR3", "Low load, very coarse, humid climate grass (D)",
2.0, 0.30, 8000, 8000,
0.10*f, 0.40*f, 0, 1.50*f, 0,
1500, 1300, 1500,
True, True)
fuelModels["GR4"] = FuelModel(
104, "GR4", "Moderate load, dry climate grass (D)",
2.0, 0.15, 8000, 8000,
0.25*f, 0, 0, 1.9*f, 0,
2000, 1800, 1500,
True, True)
fuelModels["GR5"] = FuelModel(
105, "GR5", "Low load, humid climate grass (D)",
1.5, 0.40, 8000, 8000,
0.40*f, 0.0, 0.0, 2.50*f, 0.0,
1800, 1600, 1500,
True, True)
fuelModels["GR6"] = FuelModel(
106, "GR6", "Moderate load, humid climate grass (D)",
1.5, 0.40, 9000, 9000,
0.10*f, 0, 0, 3.4*f, 0,
2200, 2000, 1500,
True, True)
fuelModels["GR7"] = FuelModel(
107, "GR7", "High load, dry climate grass (D)",
3.0, 0.15, 8000, 8000,
1.0*f, 0, 0, 5.4*f, 0,
2000, 1800, 1500,
True, True)
fuelModels["GR8"] = FuelModel(
108, "GR8", "High load, very coarse, humid climate grass (D)",
4.0, 0.30, 8000, 8000,
0.5*f, 1.0*f, 0, 7.3*f, 0,
1500, 1300, 1500,
True, True)
fuelModels["GR9"] = FuelModel(
109, "GR9", "Very high load, humid climate grass (D)",
5.0, 0.40, 8000, 8000,
1.0*f, 1.0*f, 0, 9.0*f, 0,
1800, 1600, 1500,
True, True)
# 110-112 are reserved for future standard grass models
# 113-119 are available for custom grass models
# Code GSx: Grass and shrub
# 120 available for custom grass and shrub model
fuelModels["GS1"] = FuelModel(
121, "GS1", "Low load, dry climate grass-shrub (D)",
0.9, 0.15, 8000, 8000,
0.2*f, 0, 0, 0.5*f, 0.65*f,
2000, 1800, 1800,
True, True)
fuelModels["GS2"] = FuelModel(
122, "GS2", "Moderate load, dry climate grass-shrub (D)",
1.5, 0.15, 8000, 8000,
0.5*f, 0.5*f, 0, 0.6*f, 1.0*f,
2000, 1800, 1800,
True, True)
fuelModels["GS3"] = FuelModel(
123, "GS3", "Moderate load, humid climate grass-shrub (D)",
1.8, 0.40, 8000, 8000,
0.3*f, 0.25*f, 0, 1.45*f, 1.25*f,
1800, 1600, 1600,
True, True)
fuelModels["GS4"] = FuelModel(
124, "GS4", "High load, humid climate grass-shrub (D)",
2.1, 0.40, 8000, 8000,
1.9*f, 0.3*f, 0.1*f, 3.4*f, 7.1*f,
1800, 1600, 1600,
True, True)
# 125-130 reserved for future standard grass and shrub models
# 131-139 available for custom grass and shrub models
# Shrub
# 140 available for custom shrub model
fuelModels["SH1"] = FuelModel(
141, "SH1", "Low load, dry climate shrub (D)",
1.0, 0.15, 8000, 8000,
0.25*f, 0.25*f, 0, 0.15*f, 1.3*f,
2000, 1800, 1600,
True, True)
fuelModels["SH2"] = FuelModel(
142, "SH2", "Moderate load, dry climate shrub (S)",
1.0, 0.15, 8000, 8000,
1.35*f, 2.4*f, 0.75*f, 0, 3.85*f,
2000, 1800, 1600,
True, True)
fuelModels["SH3"] = FuelModel(
143, "SH3", "Moderate load, humid climate shrub (S)",
2.4, 0.40, 8000., 8000.,
0.45*f, 3.0*f, 0, 0, 6.2*f,
1600, 1800, 1400,
True, True)
fuelModels["SH4"] = FuelModel(
144, "SH4", "Low load, humid climate timber-shrub (S)",
3.0, 0.30, 8000, 8000,
0.85*f, 1.15*f, 0.2*f, 0, 2.55*f,
2000, 1800, 1600,
True, True)
fuelModels["SH5"] = FuelModel(
145, "SH5", "High load, dry climate shrub (S)",
6.0, 0.15, 8000, 8000,
3.6*f, 2.1*f, 0, 0, 2.9*f,
750, 1800, 1600,
True, True)
fuelModels["SH6"] = FuelModel(
146, "SH6", "Low load, humid climate shrub (S)",
2.0, 0.30, 8000, 8000,
2.9*f, 1.45*f, 0, 0, 1.4*f,
750, 1800, 1600,
True, True)
fuelModels["SH7"] = FuelModel(
147, "SH7", "Very high load, dry climate shrub (S)",
6.0, 0.15, 8000, 8000,
3.5*f, 5.3*f, 2.2*f, 0, 3.4*f,
750, 1800, 1600,
True, True)
fuelModels["SH8"] = FuelModel(
148, "SH8", "High load, humid climate shrub (S)",
3.0, 0.40, 8000, 8000,
2.05*f, 3.4*f, 0.85*f, 0, 4.35*f,
750, 1800, 1600,
True, True)
fuelModels["SH9"] = FuelModel(
149, "SH9", "Very high load, humid climate shrub (D)",
4.4, 0.40, 8000, 8000,
4.5*f, 2.45*f, 0, 1.55*f, 7.0*f,
750, 1800, 1500,
True, True)
# 150-152 reserved for future standard shrub models
# 153-159 available for custom shrub models
# Timber and understory
# 160 available for custom timber and understory model
fuelModels["TU1"] = FuelModel(
161, "TU1", "Light load, dry climate timber-grass-shrub (D)",
0.6, 0.20, 8000, 8000,
0.2*f, 0.9*f, 1.5*f, 0.2*f, 0.9*f,
2000, 1800, 1600,
True, True)
fuelModels["TU2"] = FuelModel(
162, "TU2", "Moderate load, humid climate timber-shrub (S)",
1.0, 0.30, 8000, 8000,
0.95*f, 1.8*f, 1.25*f, 0, 0.2*f,
2000, 1800, 1600,
True, True)
fuelModels["TU3"] = FuelModel(
163, "TU3", "Moderate load, humid climate timber-grass-shrub (D)",
1.3, 0.30, 8000, 8000,
1.1*f, 0.15*f, 0.25*f, 0.65*f, 1.1*f,
1800, 1600, 1400,
True, True)
fuelModels["TU4"] = FuelModel(
164, "TU4", "Dwarf conifer understory (S)",
0.5, 0.12, 8000, 8000,
4.5*f, 0, 0, 0, 2.0*f,
2300, 1800, 2000,
True, True)
fuelModels["TU5"] = FuelModel(
165, "TU5", "Very high load, dry climate timber-shrub (S)",
1.0, 0.25, 8000, 8000,
4.0*f, 4.0*f, 3.0*f, 0, 3.0*f,
1500, 1800, 750,
True, True)
# 166-170 reserved for future standard timber and understory models
# 171-179 available for custom timber and understory models
# Timber and litter
# 180 available for custom timber and litter models
fuelModels["TL1"] = FuelModel(
181, "TL1", "Low load, compact conifer litter (S)",
0.2, 0.30, 8000, 8000,
1.0*f, 2.2*f, 3.6*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL2"] = FuelModel(
182, "TL2", "Low load broadleaf litter (S)",
0.2, 0.25, 8000, 8000,
1.4*f, 2.3*f, 2.2*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL3"] = FuelModel(
183, "TL3", "Moderate load conifer litter (S)",
0.3, 0.20, 8000, 8000,
0.5*f, 2.2*f, 2.8*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL4"] = FuelModel(
184, "TL4", "Small downed logs (S)",
0.4, 0.25, 8000, 8000,
0.5*f, 1.5*f, 4.2*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL5"] = FuelModel(
185, "TL5", "High load conifer litter (S)",
0.6, 0.25, 8000, 8000,
1.15*f, 2.5*f, 4.4*f, 0, 0,
2000, 1800, 160,
True, True)
fuelModels["TL6"] = FuelModel(
186, "TL6", "High load broadleaf litter (S)",
0.3, 0.25, 8000, 8000,
2.4*f, 1.2*f, 1.2*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL7"] = FuelModel(
187, "TL7", "Large downed logs (S)",
0.4, 0.25, 8000, 8000,
0.3*f, 1.4*f, 8.1*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["TL8"] = FuelModel(
188, "TL8", "Long-needle litter (S)",
0.3, 0.35, 8000, 8000,
5.8*f, 1.4*f, 1.1*f, 0, 0,
1800, 1800, 1600,
True, True)
fuelModels["TL9"] = FuelModel(
189, "TL9", "Very high load broadleaf litter (S)",
0.6, 0.35, 8000, 8000,
6.65*f, 3.30*f, 4.15*f, 0, 0,
1800, 1800, 1600,
True, True)
# 190-192 reserved for future standard timber and litter models
# 193-199 available for custom timber and litter models
# Slash and blowdown
# 200 available for custom slash and blowdown model
fuelModels["SB1"] = FuelModel(
201, "SB1", "Low load activity fuel (S)",
1.0, 0.25, 8000, 8000,
1.5*f, 3.0*f, 11.0*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["SB2"] = FuelModel(
202, "SB2", "Moderate load activity or low load blowdown (S)",
1.0, 0.25, 8000, 8000,
4.5*f, 4.25*f, 4.0*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["SB3"] = FuelModel(
203, "SB3", "High load activity fuel or moderate load blowdown (S)",
1.2, 0.25, 8000, 8000,
5.5*f, 2.75*f, 3.0*f, 0, 0,
2000, 1800, 1600,
True, True)
fuelModels["SB4"] = FuelModel(
204, "SB4", "High load blowdown (S)",
2.7, 0.25, 8000, 8000,
5.25*f, 3.5*f, 5.25*f, 0, 0,
2000, 1800, 1600,
True, True)
return fuelModels
def buildFuelModelsIdx():
fuelModels = np.empty((256,),dtype=object)
fuelModels[1] = 'FM1'
fuelModels[2] = 'FM2'
fuelModels[3] = 'FM3'
fuelModels[4] = 'FM4'
fuelModels[5] = 'FM5'
fuelModels[6] = 'FM6'
fuelModels[7] = 'FM7'
fuelModels[8] = 'FM8'
fuelModels[9] = 'FM9'
fuelModels[10] = 'FM10'
fuelModels[11] = 'FM11'
fuelModels[12] = 'FM12'
fuelModels[13] = 'FM13'
# 14-89 Available for custom models
fuelModels[91] = 'NB1'
fuelModels[92] = 'NB2'
fuelModels[93] = 'NB3'
# Indices 94-95 Reserved for future standard non-burnable models
# Indices 96-97 Available for custom NB model
fuelModels[98] = 'NB8'
fuelModels[99] = 'NB9'
# Index 100 Available for custom GR model
fuelModels[101] = 'GR1'
fuelModels[102] = 'GR2'
fuelModels[103] = 'GR3'
fuelModels[104] = 'GR4'
fuelModels[105] = 'GR5'
fuelModels[106] = 'GR6'
fuelModels[107] = 'GR7'
fuelModels[108] = 'GR8'
fuelModels[109] = 'GR9'
# 110-112 are reserved for future standard grass models
# 113-119 are available for custom grass models
# 120 available for custom grass and shrub model
fuelModels[121] = 'GS1'
fuelModels[122] = 'GS2'
fuelModels[123] = 'GS3'
fuelModels[124] = 'GS4'
# 125-130 reserved for future standard grass and shrub models
# 131-139 available for custom grass and shrub models
# 140 available for custom shrub model
fuelModels[141] = 'SH1'
fuelModels[142] = 'SH2'
fuelModels[143] = 'SH3'
fuelModels[144] = 'SH4'
fuelModels[145] = 'SH5'
fuelModels[146] = 'SH6'
fuelModels[147] = 'SH7'
fuelModels[148] = 'SH8'
fuelModels[149] = 'SH9'
# 150-152 reserved for future standard shrub models
# 153-159 available for custom shrub models
# 160 available for custom timber and understory model
fuelModels[161] = 'TU1'
fuelModels[162] = 'TU2'
fuelModels[163] = 'TU3'
fuelModels[164] = 'TU4'
fuelModels[165] = 'TU5'
# 166-170 reserved for future standard timber and understory models
# 171-179 available for custom timber and understory models
# 180 available for custom timber and litter models
fuelModels[181] = 'TL1'
fuelModels[182] = 'TL2'
fuelModels[183] = 'TL3'
fuelModels[184] = 'TL4'
fuelModels[185] = 'TL5'
fuelModels[186] = 'TL6'
fuelModels[187] = 'TL7'
fuelModels[188] = 'TL8'
fuelModels[189] = 'TL9'
# 190-192 reserved for future standard timber and litter models
# 193-199 available for custom timber and litter models
# 200 available for custom slash and blowdown model
fuelModels[201] = 'SB1'
fuelModels[202] = 'SB2'
fuelModels[203] = 'SB3'
fuelModels[204] = 'SB4'
return fuelModels
def getFuelModel(fuelModel):
fuelModels = buildFuelModels(allowDynamicModels=True,allowNonBurningModels=True)
return fuelModels[fuelModel]
def getMoistureContent(m1h,m10h,m100h,lhm,lwm):
moistureDead = [m1h,m10h,m100h,m1h]
moistureLive = [lhm,lwm]
moistureDead = [x/100 for x in moistureDead]
moistureLive = [x/100 for x in moistureLive]
return moistureDead, moistureLive
def getSAV(fuelModel):
# In behavePlus, there is a conversion to surfaceAreaToVolumeUnits
savrDead = [fuelModel.savr1HourFuel, 109.0, 30.0, fuelModel.savrLiveHerb]
savrLive = [fuelModel.savrLiveHerb, fuelModel.savrLiveWood, 0.0, 0.0]
return savrDead, savrLive
def getFuelLoad(fuelModel,moistureLive):
loadDead = [fuelModel.fuelLoad1Hour,
fuelModel.fuelLoad10Hour,
fuelModel.fuelLoad100Hour,
0.0]
loadLive = [fuelModel.fuelLoadLiveHerb,
fuelModel.fuelLoadLiveWood,
0.0,
0.0]
#print(loadDead)
#print(loadLive)
if fuelModel.isDynamic:
if moistureLive[0] < 0.30:
loadDead[3] = loadLive[0]
loadLive[0] = 0.0
elif moistureLive[0] <= 1.20:
#print(loadLive[0] * (1.333 - 1.11 * moistureLive[0]))
loadDead[3] = loadLive[0] * (1.333 - 1.11 * moistureLive[0])
#loadDead[3] = loadLive[0] * (1.20 - moistureLive[0])/0.9
loadLive[0] = loadLive[0] - loadDead[3]
#print(loadLive)
#print(loadDead)
#print(loadDead)
#print(loadLive)
return loadDead, loadLive
def getHeatOfCombustion(fuelModel):
heatOfCombustionDead = fuelModel.heatOfCombustionDeadFuel
heatOfCombustionLive = fuelModel.heatOfCombustionLiveFuel
return heatOfCombustionDead, heatOfCombustionLive
def getDLFraction(fuelModel,moistureLive):
fuelDensity = 32.0 # Rothermel 1972
savrDead, savrLive = getSAV(fuelModel)
loadDead, loadLive = getFuelLoad(fuelModel,moistureLive)
#print(loadDead)
#print(savrDead)
surfaceAreaDead = [x*y/fuelDensity for x,y in zip(loadDead,savrDead)]
surfaceAreaLive = [x*y/fuelDensity for x,y in zip(loadLive,savrLive)]
#print(surfaceAreaDead)
totalSurfaceAreaDead = np.sum(surfaceAreaDead)
totalSurfaceAreaLive = np.sum(surfaceAreaLive)
fractionOfTotalSurfaceAreaDead = totalSurfaceAreaDead/(totalSurfaceAreaDead+totalSurfaceAreaLive)
fractionOfTotalSurfaceAreaLive = 1.0 - fractionOfTotalSurfaceAreaDead
if totalSurfaceAreaDead > 1.0e-7:
deadFraction = [x/totalSurfaceAreaDead for x in surfaceAreaDead]
else:
deadFraction= [0 for x in surfaceAreaDead]
if totalSurfaceAreaLive > 1.0e-7:
liveFraction = [x/totalSurfaceAreaLive for x in surfaceAreaLive]
else:
liveFraction= [0 for x in surfaceAreaLive]
return deadFraction, liveFraction, fractionOfTotalSurfaceAreaDead, fractionOfTotalSurfaceAreaLive
def getMoistOfExt(fuelModel,moistureDead,moistureLive):
loadDead, loadLive = getFuelLoad(fuelModel,moistureLive)
savrDead, savrLive = getSAV(fuelModel)
moistOfExtDead = fuelModel.moistureOfExtinctionDeadFuel
fineDead = 0.0
fineLive = 0.0
fineFuelsWeightingFactor = 0.0
weightedMoistureFineDead = 0.0
fineDeadMoisture = 0.0
fineDeadOverFineLive = 0.0
for i in range(0,len(loadDead)):
if savrDead[i] > 1.0e-7:
fineFuelsWeightingFactor = loadDead[i] * np.exp(-138.0/savrDead[i])
fineDead = fineDead + fineFuelsWeightingFactor
weightedMoistureFineDead = weightedMoistureFineDead + fineFuelsWeightingFactor * moistureDead[i]
if fineDead > 1.0e-7:
fineDeadMoisture = weightedMoistureFineDead / fineDead
for i in range(0,len(loadLive)):
if savrLive[i] > 1.0e-7:
fineLive = fineLive + loadLive[i]*np.exp(-500.0/savrLive[i])
if fineLive > 1.0e-7:
fineDeadOverFineLive = fineDead / fineLive
moistOfExtLive = (2.9 * fineDeadOverFineLive * (1.0 - (fineDeadMoisture) / moistOfExtDead)) - 0.226
#print("MoEL:",moistOfExtLive)
if moistOfExtLive < moistOfExtDead:
moistOfExtLive = moistOfExtDead
return moistOfExtDead, moistOfExtLive
def getCharacteristicSAVR(fuelModel,intermediates,moistureLive):
deadFraction, liveFraction, deadFractionTotal, liveFractionTotal = getDLFraction(fuelModel,moistureLive)
weightedSavrLive = intermediates.live[3]
weightedSavrDead = intermediates.dead[3]
sigma = deadFractionTotal * weightedSavrDead + liveFractionTotal * weightedSavrLive
return sigma
def getPackingRatios(fuelModel,intermediates,moistureLive):
fuelDensity = 32.0 # Rothermel 1972
sigma = getCharacteristicSAVR(fuelModel,intermediates,moistureLive)
totalLoadForLifeStateLive = intermediates.live[4]
totalLoadForLifeStateDead = intermediates.dead[4]
totalLoad = totalLoadForLifeStateLive + totalLoadForLifeStateDead
depth = fuelModel.fuelBedDepth
bulkDensity = totalLoad / depth
packingRatio = totalLoad / (depth * fuelDensity)
sigma = round(sigma,0)
optimumPackingRatio = 3.348 / (sigma**0.8189)
#packingRatio = round(packingRatio,4)
relativePackingRatio = packingRatio / optimumPackingRatio
return packingRatio, relativePackingRatio, bulkDensity
def getWeightedFuelLoads(fuelModel,intermediates):
weightedFuelLoadDead = intermediates.dead[5]
weightedFuelLoadLive = intermediates.live[5]
return weightedFuelLoadDead, weightedFuelLoadLive
def getWeightedHeats(fuelModel,intermediates):
weightedHeatDead = intermediates.dead[0]
weightedHeatLive = intermediates.live[0]
return weightedHeatDead, weightedHeatLive
def getWeightedSilicas(fuelModel,intermediates):
weightedSilicaDead = intermediates.dead[1]
weightedSilicaLive = intermediates.live[1]
return weightedSilicaDead, weightedSilicaLive
def getHeatSink(fuelModel,moistureDead,moistureLive,bulkDensity):
savrDead, savrLive = getSAV(fuelModel)
qigDead = np.zeros((len(savrDead),))
qigLive = np.zeros((len(savrLive),))
deadFraction, liveFraction, deadFractionTotal, liveFractionTotal = getDLFraction(fuelModel,moistureLive)
heatSink = 0
for i in range(0,len(savrDead)):
if savrDead[i] > 1.0e-7:
qigDead[i] = 250 + 1116.0 * (moistureDead[i])
heatSink = heatSink + deadFractionTotal*deadFraction[i]*qigDead[i]*np.exp(-138.0/savrDead[i])
if savrLive[i] > 1.0e-7:
qigLive[i] = 250 + 1116.0 * (moistureLive[i])
heatSink = heatSink + liveFractionTotal*liveFraction[i]*qigLive[i]*np.exp(-138.0/savrLive[i])
heatSink = heatSink * bulkDensity
return heatSink
def getHeatFlux(fuelModel,moistureDead,moistureLive,sigma,packingRatio):
if sigma < 1.0e-7:
heatFlux = 0.0
else:
heatFlux = np.exp((0.792 + 0.681 * sigma**0.5)*(packingRatio + 0.1)) / (192 + 0.2595 * sigma)
return heatFlux
def getWeightedMoistures(fuelModel,intermediates):
weightedMoistureDead = intermediates.dead[2]
weightedMoistureLive = intermediates.live[2]
return weightedMoistureDead, weightedMoistureLive
def getEtaM(fuelModel,intermediates,MoED,MoEL):
weightedMoistureDead, weightedMoistureLive = getWeightedMoistures(fuelModel,intermediates)
def calculateEtaM(weightedMoisture,MoE):
relativeMoisture = 0.0
if MoE > 0.0:
relativeMoisture = weightedMoisture / MoE
if weightedMoisture > MoE or relativeMoisture > 1.0:
etaM = 0
else:
etaM = 1.0 - (2.59 * relativeMoisture) + (5.11 * (relativeMoisture**2))-(3.52*(relativeMoisture**3))
return etaM
etaMDead = calculateEtaM(weightedMoistureDead,MoED)
etaMLive = calculateEtaM(weightedMoistureLive,MoEL)
return etaMDead, etaMLive
def getEtaS(fuelModel,intermediates):
weightedSilicaDead, weightedSilicaLive = getWeightedSilicas(fuelModel,intermediates)
def calculateEtaS(weightedSilica):
etaSDen = weightedSilica ** 0.19
if etaSDen < 1e-6:
etaS = 0.0
else:
etaS = 0.174 / etaSDen
return min([etaS,1.0])
etaSDead = calculateEtaS(weightedSilicaDead)
etaSLive = calculateEtaS(weightedSilicaLive)
return etaSDead, etaSLive
def getSurfaceFireReactionIntensity(fuelModel,sigma,relativePackingRatio,MoED,MoEL,intermediates):
aa = 133.0 / (sigma ** 0.7913) # Albini 1976
gammaMax = (sigma ** 1.5) / (495.0+(0.0594*(sigma**1.5)))
gamma = gammaMax * (relativePackingRatio**aa) * np.exp(aa * (1.0-relativePackingRatio))
weightedFuelLoadDead, weightedFuelLoadLive = getWeightedFuelLoads(fuelModel,intermediates)
weightedHeatDead, weightedHeatLive = getWeightedHeats(fuelModel,intermediates)
#MoEL = 1.99
etaMDead, etaMLive = getEtaM(fuelModel,intermediates,MoED,MoEL)
etaSDead, etaSLive = getEtaS(fuelModel,intermediates)
#print("gamma:",gamma)
#print("weightedFuelLoadDead/Live:",weightedFuelLoadDead,weightedFuelLoadLive)
#print("weightedHeatDead/Live:",weightedHeatDead,weightedHeatLive)
#print("etaMDead/Live",etaMDead,etaMLive)
#print("etaSDead/Live",etaSDead,etaSLive)
"""
print("gamma",gamma)
print("weightedFuelLoadDead",weightedFuelLoadDead)
print("weightedHeatDead",weightedHeatDead,weightedHeatLive)
print("etaMDead",etaMDead,etaMLive)
print("etaSDead",etaSDead,etaSLive)
"""
reactionIntensityDead = gamma * weightedFuelLoadDead * weightedHeatDead * etaMDead * etaSDead
reactionIntensityLive = gamma * weightedFuelLoadLive * weightedHeatLive * etaMLive * etaSLive
#reactionIntensityDead = 7505
reactionIntensity = reactionIntensityDead+reactionIntensityLive
#print("Dead Fuel Reaction Intensity: %.0f"%(reactionIntensityDead))
#print("Live Fuel Reaction Intensity: %.0f"%(reactionIntensityLive))
#print("Reaction Intensity: %.0f"%(reactionIntensity))
return reactionIntensity, reactionIntensityDead, reactionIntensityLive
def getNoWindNoSlopeSpreadRate(reactionIntensity,heatFlux,heatSink):
if heatSink < 1.0e-7:
Rstar = 0.0
else:
Rstar = reactionIntensity*heatFlux/heatSink
#print("HeatSource:",reactionIntensity*heatFlux)
#print("NoWindNoSlopeSpredRate:",Rstar)
return Rstar
def convertFtMinToChHr(R):
R = R/1.100
return R
def calculateMidflameWindSpeed(fuelModel,windSpeed,canopyCover,canopyHeight,crownRatio,
windHeightInputMode='TwentyFoot'):
if windHeightInputMode == 'TenMeter':
windSpeed = windSpeed/ 1.15
depth = fuelModel.fuelBedDepth
canopyCrownFraction = crownRatio * canopyCover / 3.0
if canopyCover < 1.0e-7 or canopyCrownFraction < 0.05 or canopyHeight < 6.0:
sheltered = False
else:
sheltered = True
if sheltered:
waf = 0.555 / (((canopyCrownFraction * canopyHeight)**0.5)*np.log((20.0+0.36*canopyHeight) / (0.13 * canopyHeight)))
elif depth > 1.0e-7:
waf = 1.83 / np.log((20.0+0.36 * depth) / (0.13 * depth))
else:
waf = 1.0
midflameWindSpeed = waf * windSpeed
return midflameWindSpeed, waf
def calculateWindFactor(sigma,relativePackingRatio,mfWindSpeed):
windC, windB, windE = getWindIntermediates(sigma)
mfWindSpeed = mfWindSpeed*88 # Convert mph to ft/min
if mfWindSpeed < 1.0e-7:
phiW = 0.0
else:
phiW = (mfWindSpeed**windB) * windC * (relativePackingRatio**(-windE))
return phiW
def getWindIntermediates(sigma):
windC = 7.47 * np.exp(-0.133 * (sigma**0.55))
windB = 0.02526 * (sigma ** 0.54)
windE = 0.715 * np.exp(-0.000359*sigma)
return windC, windB, windE
def calculateSlopeFactor(slope,packingRatio,isAngle=False,isDegree=True):
if isAngle:
if isDegree:
slope = slope/180.0*3.1415926535
slopex = np.tan(slope)
else:
slopex = slope
phiS = 5.275 * (packingRatio**(-0.3)) * (slopex**2)
return phiS
def calculateROS(Rstar,phiW,phiS):
R = Rstar * (1+phiW+phiS)
return R
def calculateDirectionOfMaxSpread(windDir,aspect,Rstar,phiS,phiW):
correctedWindDir = windDir-aspect
windDirRadians = correctedWindDir * 3.1415926535 / 180.0
slopeRate = Rstar*phiS
windRate = Rstar*phiW
x = slopeRate + (windRate * np.cos(windDirRadians))
y = windRate * np.sin(windDirRadians)
rateVector = ((x**2)+(y**2))**0.5
forwardSpreadRate = Rstar + rateVector
azimuth = np.arctan2(y,x) * 180.0 / 3.1415926535
if azimuth < -1.0e-20:
azimuth = azimuth + 360
azimuth = azimuth + aspect + 180.0
if azimuth >= 360.0:
azimuth = azimuth - 360.0
return azimuth, forwardSpreadRate
def calculateWindSpeedLimit(reactionIntensity,phiS):
windSpeedLimit = 0.9 * reactionIntensity
if phiS > 0.0:
if phiS > windSpeedLimit:
phiS = windSpeedLimit
return windSpeedLimit
def calculateEffectiveWindSpeed(forwardSpreadRate,Rstar,relativePackingRatio,sigma,windSpeedLimit=9001):
windC, windB, windE = getWindIntermediates(sigma)
phiEffectiveWind = forwardSpreadRate/Rstar - 1.0
effectiveWindSpeed = ((phiEffectiveWind*(relativePackingRatio**windE)) / windC)**(1/windB)
effectiveWindSpeed = effectiveWindSpeed / 88
if effectiveWindSpeed > windSpeedLimit/88:
effectiveWindSpeed = windSpeedLimit/88
return effectiveWindSpeed
def getResidenceTime(sigma):
if sigma < 1.0e-7:
residenceTime = 0.0
else:
residenceTime = 384.0/sigma
return residenceTime
def calculateFireBasicDimensions(effectiveWindSpeed,forwardSpreadRate):
#print("***EFF,",effectiveWindSpeed)#*88/60)
if effectiveWindSpeed > 1.0e-7:
fireLengthToWidthRatio = 1.0 + (0.25 * effectiveWindSpeed)#*88/60)
else:
fireLengthToWidthRatio = 1.0
#print("default fl2wr:",fireLengthToWidthRatio)
#print("default ecc:",(1-(1/fireLengthToWidthRatio)**2)**0.5)
#fireLengthToWidthRatio = 1.174
#fireLengthToWidthRatio = 2.25
#fireLengthToWidthRatio = 1.174 # with effective wind speed 15.7 mi/h
#fireLengthToWidthRatio = 1.161 # with effective wind speed 8.5 mi/h
#fireLengthToWidthRatio = 1.145 # with effective wind speed 5.0 mi/h
x = (fireLengthToWidthRatio**2) - 1.0
if x > 0.0:
eccentricity = (x**0.5) / fireLengthToWidthRatio
#eccentricity = (1-(1/fireLengthToWidthRatio)**2)**0.5
else:
eccentricity = 0.0
#eccentricity = 0.9045
#print("modded fl2wr:",fireLengthToWidthRatio)
#print("modded ecc:",eccentricity)
backingSpreadRate = forwardSpreadRate * (1.0-eccentricity) / (1.0+eccentricity)
ellipticalB = (forwardSpreadRate + backingSpreadRate) / 2.0
ellipticalC = ellipticalB - backingSpreadRate
if fireLengthToWidthRatio > 1e-7:
ellipticalA = ellipticalB / fireLengthToWidthRatio
else:
ellipticalA = 0.0
return fireLengthToWidthRatio, eccentricity, backingSpreadRate, ellipticalA, ellipticalB, ellipticalC
def calculateFireFirelineIntensity(forwardSpreadRate,reactionIntensity,residenceTime):
firelineIntensity = forwardSpreadRate * reactionIntensity * residenceTime / 60.0
return firelineIntensity
def calculateFlameLength(firelineIntensity):
flameLength = max([0.0,0.45*(firelineIntensity**0.46)])
return flameLength
def calculateSpreadRateAtVector(forwardSpreadRate,eccentricity,dirRmax,dirOfInterest):
if forwardSpreadRate > 0.0:
beta = abs(dirRmax - dirOfInterest)
#print("%.1f,%.1f,%.1f"%(dirRmax,dirOfInterest,beta))
if beta > 180.0:
beta = (360-beta)
betaRad = beta * 3.1415926535/180.0
dirFactor = ((np.cos(betaRad)+1)/2)
# This is the equation according to the BehavePlus source code:
rosVector = forwardSpreadRate * (1.0-eccentricity) / (1.0-eccentricity* np.cos(betaRad))
# This is the equaiton I have found to match BehavePlus results:
rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor)
# Combining the two smooths out the peak
rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor + (abs(betaRad)/3.1415926535)*rosVector)
#eccentricity = 0.9
#rosVector = forwardSpreadRate * (1.0-eccentricity) / (1.0-eccentricity*dirFactor)
#if beta < 30:
#rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor + (betaRad/3.1415926535)*rosVector)
#print(dirOfInterest,betaRad,rosVector)
else:
rosVector = 0.0
return rosVector
def calculateSpreadRateAtVector2(forwardSpreadRate,backSpreadRate,eccentricity,dirRmax,dirOfInterest):
if forwardSpreadRate > 0.0:
beta = abs(dirRmax - dirOfInterest)
#print("%.1f,%.1f,%.1f"%(dirRmax,dirOfInterest,beta))
if beta > 180.0:
beta = (360-beta)
if abs(beta) > 0.1:
betaRad = beta * 3.1415926535/180.0
dirFactor = ((np.cos(betaRad)+1)/2)
# This is the equation according to the BehavePlus source code:
rosVector = forwardSpreadRate * (1.0-eccentricity) / (1.0-eccentricity* np.cos(betaRad))
# This is the equaiton I have found to match BehavePlus results:
#rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor)
#rosVector = ((1-abs(betaRad)/3.1415926535)*(forwardSpreadRate-backSpreadRate) * dirFactor)+backSpreadRate
# Combining the two smooths out the peak
#rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor + (abs(betaRad)/3.1415926535)*rosVector)
#eccentricity = 0.9
#rosVector = forwardSpreadRate * (1.0-eccentricity) / (1.0-eccentricity*dirFactor)
#if beta < 30:
#rosVector = ((1-abs(betaRad)/3.1415926535)*forwardSpreadRate * dirFactor + (betaRad/3.1415926535)*rosVector)
#print(dirOfInterest,betaRad,rosVector)
else:
rosVector = forwardSpreadRate
if rosVector < backSpreadRate:
rosVector = backSpreadRate
else:
rosVector = 0.0
return rosVector
def scaleRandomValue(mn,mx):
value = np.random.random()*(mx-mn)+mn
return value
def getRandomConditions(params,allowDynamicModels=True):
paramsRand = dict()
for key in params.keys():
if params[key][0] == None:
minValue = params[key][1]
maxValue = params[key][2]
if key == 'model':
fuelModels = list(buildFuelModels(allowDynamicModels=minValue,allowNonBurningModels=maxValue).keys())
value = fuelModels[np.random.randint(0,len(fuelModels))]
else:
value = scaleRandomValue(minValue,maxValue)
else:
value = params[key][0]
paramsRand[key] = value
return paramsRand
def orderParams(params,toPrint=False):
model = params['model']
canopyCover = params['canopyCover']*100
canopyHeight = params['canopyHeight']
crownRatio = params['crownRatio']
m1h = params['m1h']
m10h = params['m10h']
m100h = params['m100h']
lhm = params['lhm']
lwm = params['lwm']
windSpeed = params['windSpeed']
windDir = params['windDir']
slope = params['slope']*100
aspect = params['aspect']
orderedParams = [model,canopyCover,canopyHeight,crownRatio,m1h,m10h,m100h,lhm,lwm,windSpeed,windDir,slope,aspect]
if toPrint:
print("************************************************************")
print("Starting simulation")
print("model:\t\t\t%s"%(model))
print("canopyCover:\t\t%.2f"%(canopyCover))
print("canopyHeight:\t\t%.2f"%(canopyHeight))
print("crownRatio:\t\t%.2f"%(crownRatio))
print("m1h:\t\t\t%.2f"%(m1h))
print("m10h:\t\t\t%.2f"%(m10h))
print("m100h:\t\t\t%.2f"%(m100h))
print("lhm:\t\t\t%.2f"%(lhm))
print("lwm:\t\t\t%.2f"%(lwm))
print("windSpeed:\t\t%.2f"%(windSpeed))
print("windDir:\t\t%.2f"%(windDir))
print("slope:\t\t\t%.2f"%(slope))
print("aspect:\t\t\t%.2f"%(aspect))
return orderedParams
def getROSfromParams(params,toPrint=False,maxOnly=False):
model = params['model']
canopyCover = params['canopyCover']
canopyHeight = params['canopyHeight']
crownRatio = params['crownRatio']
m1h = params['m1h']
m10h = params['m10h']
m100h = params['m100h']
lhm = params['lhm']
lwm = params['lwm']
windSpeed = params['windSpeed']
windDir = params['windDir']
slope = params['slope']
aspect = params['aspect']
orderParams(params,toPrint=toPrint)
directions = np.linspace(0,360,361)
fuelModel = getFuelModel(model)
moistureDead, moistureLive = getMoistureContent(m1h,m10h,m100h,lhm,lwm)
loadDead, loadLive = getFuelLoad(fuelModel,moistureLive)
savrDead, savrLive = getSAV(fuelModel)
deadFraction, liveFraction, deadFractionTotal, liveFractionTotal = getDLFraction(fuelModel,moistureLive)
if toPrint:
print(deadFraction)
print(liveFraction)
moistOfExtDead, moistOfExtLive = getMoistOfExt(fuelModel,moistureDead,moistureLive)
heatDead, heatLive = getHeatOfCombustion(fuelModel)
intermediates = lifeStateIntermediate(fuelModel,moistureDead,moistureLive)
sigma = getCharacteristicSAVR(fuelModel,intermediates,moistureLive)
packingRatio, relativePackingRatio, bulkDensity = getPackingRatios(fuelModel,intermediates,moistureLive)
heatSink = getHeatSink(fuelModel,moistureDead,moistureLive,bulkDensity)
heatFlux = getHeatFlux(fuelModel,moistureDead,moistureLive,sigma,packingRatio)
reactionIntensity, reactionIntensityDead, reactionIntensityLive = getSurfaceFireReactionIntensity(fuelModel,sigma,relativePackingRatio,moistOfExtDead,moistOfExtLive,intermediates)
Rstar = getNoWindNoSlopeSpreadRate(reactionIntensity,heatFlux,heatSink)
mfWindSpeed, waf = calculateMidflameWindSpeed(fuelModel,windSpeed,canopyCover,canopyHeight,crownRatio)
phiW = calculateWindFactor(sigma,relativePackingRatio,mfWindSpeed)
phiS = calculateSlopeFactor(slope,packingRatio)
dirRmax, forwardSpreadRate = calculateDirectionOfMaxSpread(windDir,aspect,Rstar,phiS,phiW)
windSpeedLimit = calculateWindSpeedLimit(reactionIntensity,phiS)
effectiveWindSpeed = calculateEffectiveWindSpeed(forwardSpreadRate,Rstar,relativePackingRatio,sigma,windSpeedLimit=windSpeedLimit)
residenceTime = getResidenceTime(sigma)
#effectiveWindSpeed = 3.9
fireLengthToWidthRatio, eccentricity, backingSpreadRate, eA, eB, eC = calculateFireBasicDimensions(effectiveWindSpeed,forwardSpreadRate)
firelineIntensity = calculateFireFirelineIntensity(forwardSpreadRate,reactionIntensity,residenceTime)
flameLength = calculateFlameLength(firelineIntensity)
rosVectors = []
R = calculateSpreadRateAtVector(forwardSpreadRate,eccentricity,dirRmax,dirRmax)
R = convertFtMinToChHr(R)
if toPrint:
print("************************************************************")
print("Rate of Spread:\t\t\t\t\t%.1f\tch/h"%(R))
print("Reaction Intensity:\t\t\t\t%.0f\tBtu/ft2/min"%(reactionIntensity))
print("Surface Fire Dir of Max Spread (from north):\t%.0f\tdeg"%(dirRmax))
print("Midflame Wind Speed:\t\t\t\t%.1f\tmi/h"%(mfWindSpeed))
print("Wind Adjustment Factor:\t\t\t\t%.2f"%(waf))
print("Effective Wind Speed:\t\t\t\t%.1f\tmi/h"%(effectiveWindSpeed))
print("Live Fuel Moisture of Extinction:\t\t%.0f"%(moistOfExtLive*100))
print("Characteristic SA/V:\t\t\t\t%s\tft2/ft3"%(int(sigma)))
print("Bulk Density:\t\t\t\t\t%.4f\tlbs/ft3"%(bulkDensity))
print("Packing Ratio:\t\t\t\t\t%.4f"%(packingRatio))
print("Relative Packing Ratio:\t\t\t\t%.4f"%(relativePackingRatio))
print("Dead Fuel Reaction Intensity:\t\t\t%.0f\tBtu/ft2/min"%(reactionIntensityDead))
print("Live Fuel Reaction Intensity:\t\t\t%.0f\tBtu/ft2/min"%(reactionIntensityLive))
print("Surface Fire Wind Factor:\t\t\t%.1f"%(phiW))
print("Slope Factor:\t\t\t\t\t%.1f"%(phiS))
print("Heat Source:\t\t\t\t\t%.0f\tBtu/ft2/min"%(heatFlux*reactionIntensity*(1+phiS+phiW)))
print("Heat Sink:\t\t\t\t\t%.1f\tBtu/ft3"%(heatSink))
print("Dead Herbaceous Fuel Load:\t\t\t%.2f\tton/ac"%(loadDead[3]*21.78))
print("Live Fuel Load Remainder:\t\t\t%.2f\tton/ac"%(loadLive[0]*21.78))
print("Total Dead Fuel Load:\t\t\t\t%.2f\tton/ac"%(np.sum(loadDead)*21.78))
print("Total Live Fuel Load:\t\t\t\t%.2f\tton/ac"%(np.sum(loadLive)*21.78))
print("Dead Fuel Load Portion:\t\t\t\t%.2f"%(np.sum(loadDead)/(np.sum(loadDead)+np.sum(loadLive))*100))
print("Live Fuel Load Portion:\t\t\t\t%.2f"%(np.sum(loadLive)/(np.sum(loadDead)+np.sum(loadLive))*100))
print("************************************************************")
if maxOnly:
return dirRmax, R
for dirOfInterest in directions:
rosVector = calculateSpreadRateAtVector2(forwardSpreadRate,backingSpreadRate,eccentricity,dirRmax,dirOfInterest)
rosVector = convertFtMinToChHr(rosVector)
rosVectors.append(rosVector)
rosVectors = np.array(rosVectors)
#R = calculateROS(Rstar, phiW, phiS)
return directions, rosVectors
def cartCoords(thetaRad,rosVectorsKmHr):
coords = np.zeros((len(rosVectorsKmHr),2))
x = -1*np.array(rosVectorsKmHr)*np.sin(thetaRad)
y = -1*np.array(rosVectorsKmHr)*np.cos(thetaRad)
coords[:,0] = x
coords[:,1] = y
return coords
def rothermelOuputToImg(theta,R,resX=50,resY=50):
coords = cartCoords(theta,R.copy())
(coords[:,0],coords[:,1]) = (coords[:,0]+resX/2,coords[:,1]+resY/2)
coords = np.array(coords,dtype=np.int32)
coordsTuple = []
for c in coords:
coordsTuple.append((c[0],c[1]))
img = Image.new('LA',(resX,resY))
draw = ImageDraw.Draw(img)
draw.polygon(coordsTuple,fill='black',outline=None)
img = np.copy(np.asarray(img)[:,:,1])
#img[int(resX/2),int(resY/2)] = 125
return img
def rothermelOuputToImgMulti(theta,Rbase,times,resX=50,resY=50):
img = Image.new('LA',(resX,resY))
draw = ImageDraw.Draw(img)
for t in times:
coords = cartCoords(theta,Rbase.copy()*t)
(coords[:,0],coords[:,1]) = (coords[:,0]+resX/2,coords[:,1]+resY/2)
coords = np.array(coords,dtype=np.int32)
coordsTuple = []
for c in coords:
coordsTuple.append((c[0],c[1]))
draw.polygon(coordsTuple,fill=(t,t),outline=(t,t))
img = np.copy(np.asarray(img)[:,:,1])
#img[int(resX/2),int(resY/2)] = 125
return img
def convertChHrToKmHour(R):
if R is list:
for r in R:
r = r*(1.1)*(60.0/5280.0)*(1.60934)
else:
R = R*1.1 # Convert to ft/min
R = R*60.0/5280.0 # Convert to mi/hour
R = R*1.60934 # Convert to km/hour
return R
def convertDegToRad(theta):
if theta is list:
for r in theta:
r = r*3.1415926535/180.0
else:
theta = theta*3.1415926535/180.0
return theta
def slopeToElevImg(phi,phiDir,resX=50,resY=50):
phiDirRad = phiDir*3.1415926535/180.0
slopeX = phi*np.sin(phiDirRad)
slopeY = -phi*np.cos(phiDirRad)
img = np.zeros((2,2))
#img[img == 0] = np.nan
img[0,0] = -resX/2*slopeX+resY/2*slopeY
img[0,-1]= resX/2*slopeX+resY/2*slopeY
img[-1,0] = -resX/2*slopeX-resY/2*slopeY
img[-1,-1] = resX/2*slopeX-resY/2*slopeY
img = zoom(img,resX/2,order=1)
return img
def visualizeInputImgs(directions,rosVectors,params,resX=50,resY=50,toPlot=True):
rosVectorsKmHr = convertChHrToKmHour(rosVectors)
directionsRad = convertDegToRad(directions)
x = -1*np.array(rosVectorsKmHr)*np.sin(directionsRad)
y = np.array(rosVectorsKmHr)*np.cos(directionsRad)
img6 = rothermelOuputToImg(directionsRad,rosVectorsKmHr*6.0,resX=resX,resY=resY)
img12 = rothermelOuputToImg(directionsRad,rosVectorsKmHr*12.0,resX=resX,resY=resY)
img18 = rothermelOuputToImg(directionsRad,rosVectorsKmHr*18.0,resX=resX,resY=resY)
img24 = rothermelOuputToImg(directionsRad,rosVectorsKmHr*24.0,resX=resX,resY=resY)
elevImg = slopeToElevImg(params['slope'],params['aspect'],resX=resX,resY=resY)
windDirRad = params['windDir']*3.1415926536/180.0
windX = np.zeros((resX,resY))+params['windSpeed']*np.sin(windDirRad)
windY = np.zeros((resX,resY))-params['windSpeed']*np.cos(windDirRad)
lhmImg = np.zeros((resX,resY))+params['lhm']
lwmImg = np.zeros((resX,resY))+params['lwm']
m1hImg = np.zeros((resX,resY))+params['m1h']
m10hImg = np.zeros((resX,resY))+params['m10h']
m100hImg = np.zeros((resX,resY))+params['m100h']
canopyCoverImg = np.zeros((resX,resY))+params['canopyCover']
canopyHeightImg = np.zeros((resX,resY))+params['canopyHeight']
crownRatioImg = np.zeros((resX,resY))+params['crownRatio']
modelImg = np.zeros((resX,resY))+params['modelInd']
fireImages = [img6,img12,img18,img24]
modelInputs = [elevImg,windX,windY,lhmImg,lwmImg,m1hImg,m10hImg,m100hImg,canopyCoverImg,canopyHeightImg,crownRatioImg,modelImg]
if toPlot:
plt.figure(figsize=(12,12))
plt.suptitle('Fuel Model:%s'%(params['model']))
plt.subplot(4,4,1)
plt.imshow(img12,cmap='jet')
plt.colorbar()
plt.title('Fire at 12 hours')
plt.subplot(4,4,2)
plt.imshow(img24,cmap='jet')
plt.colorbar()
plt.title('Fire at 24 hours')
plt.subplot(4,4,3)
plt.imshow(elevImg,cmap='jet')
plt.colorbar()
plt.title('Elevation')
plt.subplot(4,4,4)
plt.imshow(windX,cmap='jet',vmin=-20,vmax=20)
plt.colorbar()
plt.title('WindX')
plt.subplot(4,4,5)
plt.imshow(windY,cmap='jet',vmin=-20,vmax=20)
plt.colorbar()
plt.title('WindY')
plt.subplot(4,4,6)
plt.imshow(lhmImg,cmap='jet',vmin=30,vmax=150)
plt.colorbar()
plt.title('Live Herbaceous Moisture')
plt.subplot(4,4,7)
plt.imshow(lwmImg,cmap='jet',vmin=30,vmax=150)
plt.colorbar()
plt.title('Live Woody Moisture')
plt.subplot(4,4,8)
plt.imshow(m1hImg,cmap='jet',vmin=0,vmax=40)
plt.colorbar()
plt.title('1-hour Moisture')
plt.subplot(4,4,9)
plt.imshow(canopyCoverImg,cmap='jet',vmin=0,vmax=1)
plt.colorbar()
plt.title('Canopy Cover')
plt.subplot(4,4,10)
plt.imshow(canopyHeightImg,cmap='jet',vmin=1,vmax=20)
plt.colorbar()
plt.title('Canopy Height')
plt.subplot(4,4,11)
plt.imshow(crownRatioImg,cmap='jet',vmin=0,vmax=1)
plt.colorbar()
plt.title('Crown Ratio')
plt.subplot(4,4,12)
plt.imshow(modelImg,cmap='jet',vmin=0,vmax=52)
plt.colorbar()
plt.title('Model')
#plt.plot(x,y)
#plt.plot(0,0,'ok')
#xRange = x.max()-x.min()
#yRange = y.max()-y.min()
#plt.xlim([x.min()-xRange/2,x.max()+xRange/2])
#plt.ylim([y.min()-yRange/2,y.max()+yRange/2])
#plt.title('Rate of Spread')
return fireImages, modelInputs
def visualizeInputValues(directions,rosVectors,params,resX=50,resY=50):
rosVectorsKmHr = convertChHrToKmHour(rosVectors)
directionsRad = convertDegToRad(directions)
x = -1*np.array(rosVectorsKmHr)*np.sin(directionsRad)
y = np.array(rosVectorsKmHr)*np.cos(directionsRad)
imgFire = rothermelOuputToImgMulti(directionsRad,rosVectorsKmHr,[48,42,36,30,24,18,12,6],resX=resX,resY=resY)
imgFire[25,25] = 0
elevImg = slopeToElevImg(params['slope'],params['aspect'],resX=resX,resY=resY)
windDirRad = params['windDir']*3.1415926536/180.0
windSpeed = params['windSpeed']
windX = 1.0*windSpeed*np.sin(windDirRad)
windY = -1.0*windSpeed*np.cos(windDirRad)
windYs = [windX,windY]
windXs = np.arange(len(windYs))
windNames = ('E+','N+')
windLimits = [-20,20]
moistYs = [params['m1h'],params['m10h'],params['m100h'],params['lhm']/5,params['lwm']/5]
moistXs = np.arange(len(moistYs))
moistNames = ('m1h','m10h','m100h','lhm/5','lwm/5')
moistLimits = [0,60]
canopyYs = [params['canopyCover'],params['canopyHeight']/20,params['crownRatio']]
canopyXs = np.arange(len(canopyYs))
canopyNames = ('Cover (%)','Height (ft/20)','Ratio (%)')
canopyLimits = [0,1]
modelYs = [params['modelInd'],0]
modelXs = np.arange(len(modelYs))
modelNames = (str(params['model']),'')
modelLimits = [0,52]
plt.figure(figsize=(10,14))
plt.suptitle('Fuel Model:%s'%(params['model']))
plt.subplot(3,2,1)
plt.imshow(imgFire,cmap='gray_r')
c = plt.colorbar(ticks=[48,36,24,12,0])
plt.title('Fire spread')
plt.xlabel('km')
plt.ylabel('km')
c.ax.set_label('Hours')
#plt.subplot(3,3,2)
#plt.imshow(img24,cmap='jet')
#plt.colorbar()
#plt.title('Fire at 24 hours')
plt.subplot(3,2,3)
plt.imshow(elevImg,cmap='jet')
plt.colorbar()
plt.title('Elevation Difference [km]')
plt.subplot(3,2,4)
plt.bar(windXs,windYs,align='center');
plt.xticks(windXs,windNames);
plt.ylabel('WindSpeed (mph)');
plt.ylim(windLimits)
plt.subplot(3,2,5)
plt.bar(moistXs,moistYs,align='center');
plt.xticks(moistXs,moistNames);
plt.ylabel('Moisture (%)');
plt.ylim(moistLimits)
plt.subplot(3,2,6)
plt.bar(canopyXs,canopyYs,align='center');
plt.xticks(canopyXs,canopyNames);
plt.ylabel('Canopy (%)');
plt.ylim(canopyLimits)
plt.subplot(3,2,2)
plt.bar(modelXs,modelYs,align='center');
plt.xticks(modelXs,modelNames);
plt.ylabel('Model Rank');
plt.ylim(modelLimits)
plt.xlim([-0.95,0.95])
#plt.plot(x,y)
#plt.plot(0,0,'ok')
#xRange = x.max()-x.min()
#yRange = y.max()-y.min()
#plt.xlim([x.min()-xRange/2,x.max()+xRange/2])
#plt.ylim([y.min()-yRange/2,y.max()+yRange/2])
#plt.title('Rate of Spread')
return imgFire
def paramListTodict(paramsRaw):
params = dict()
params['model'] = paramsRaw[0]
params['canopyCover'] = float(paramsRaw[1])/100
params['canopyHeight'] = float(paramsRaw[2])
params['crownRatio'] = float(paramsRaw[3])
params['m1h'] = float(paramsRaw[4])
params['m10h'] = float(paramsRaw[5])
params['m100h'] = float(paramsRaw[6])
params['lhm'] = float(paramsRaw[7])
params['lwm'] = float(paramsRaw[8])
params['windSpeed'] = float(paramsRaw[9])
params['windDir'] = float(paramsRaw[10])
params['slope'] = float(paramsRaw[11])/100
params['aspect'] = float(paramsRaw[12])
return params
def getStandardParams():
# model,canopyCover/100,Height,Ratio,m1h,m10h,m100h,lhm,l2m,windSpeed,windDir,slope,aspect
#paramList = ['FM1',0,0,0.5,8,6,4,60,60,10,0,0.5,0]
paramList = ['FM1',0,0,0.5,8,9,10,60,60,10,0,0.5,0]
params = paramListTodict(paramList)
return params
def determineFastestModel(params=None,toPrint=False):
if params is None:
params = getStandardParams()
fuelModels = buildFuelModels(allowDynamicModels=True,allowNonBurningModels=True)
updatedModels = []
Rs = []
for fuelModel in list(fuelModels.keys()):
params['model'] = fuelModel
direction, R = getROSfromParams(params,maxOnly=True)
updatedModels.append(fuelModel)
Rs.append(R)
numZero = len(np.where(np.array(Rs) <= 0.01)[0])
inds = np.argsort(Rs)
updatedModelsSort = np.array(updatedModels)[inds]
RsSort = np.sort(Rs)
modelIndexDict = dict()
for i in range(0,len(inds)):
value = max(0,i-numZero+1)
modelIndexDict[updatedModelsSort[i]] = value
if toPrint:
print("Model = %s,\tR = %.2f"%(updatedModelsSort[i],RsSort[i]))
return modelIndexDict
def rearrangeDatas(datas):
sz = datas[0].shape
szrs = sz[0]*sz[1]
datasNew = np.zeros((szrs*len(datas),))
for i in range(0,len(datas)):
datasNew[i*szrs:(i+1)*szrs] = np.reshape(datas[i],(szrs,))
return datasNew
def getStandardParamsInput():
paramsInput = dict()
paramsInput['model'] = [None,True,False] # string
paramsInput['canopyCover'] = [None,0.0,1.0] # percent (0-1)
paramsInput['canopyHeight'] = [None,1.0,20.0] # ft (1-20)
paramsInput['crownRatio'] = [None,0.1,1.0] # fraction (0.1-1)
paramsInput['m1h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['m10h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['m100h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['lhm'] = [None,30.0,100.0] # percent (30-300)
paramsInput['lwm'] = [None,30.0,100.0] # percent (30-300)
paramsInput['windSpeed'] = [None,0.0,30.0] # mph (0-30)
paramsInput['windDir'] = [None,0.0,360.0] # degrees (0-360)
paramsInput['slope'] = [None,0.0,1.0] # fraction (0-1)
paramsInput['aspect'] = [None,0.0,360.0] # degrees (0-360)
paramsInput['Mth'] = [None,5,9] # integer
paramsInput['Day'] = [None,0,31] # integer
paramsInput['Pcp'] = [None,0.3,10.9] # mm
paramsInput['mTH'] = [None,400,600] # 24-Hour
paramsInput['xTH'] = [None,1200,1500] # 24-Hour
paramsInput['mT'] = [None,2.0,16.6] # degrees C
paramsInput['xT'] = [None,28.9,37.2] # degrees C
paramsInput['mH'] = [None,39.2,50.0] # Percent
paramsInput['xH'] = [None,39.2,50.0] # Percent
paramsInput['PST'] = [None,0,2400] # Precipitation Start Time
paramsInput['PET'] = [None,0,2400] # Precipitation End Time
paramsInput['startTime'] = [None,0,24] # Fire start hour
return paramsInput
def manyFiresInputFigure(modelInputs):
fig, ax = plt.subplots(figsize=(8,8))
a = []
lims = [[-30,30],[-20,20],[-20,20],[30,150],[30,150],[0,30],[0,30],[0,30],[0,1],[0,20],[0,1],[0,53]]
names = ['Elevation','East Wind','North Wind','Live Herbaceous Moisture','Live Woody Moisture','1-Hour Moisture','10-Hour Moisture','100-Hour Moisture','Canopy Cover','Canopy Height','Crown Ratio','Fuel Model']
textOffset = [0]
#modelInputs = [elevImg,windX,windY,lhmImg,lwmImg,m1hImg,m10hImg,m100hImg,canopyCoverImg,canopyHeightImg,crownRatioImg,modelImg]
for i in range(len(modelInputs)-1,-1,-1):
img = modelInputs[i].copy()
img[-1,-1] = lims[i][0]
img[-1,-2] = lims[i][1]
oi = OffsetImage(img, zoom = 2.0, cmap='jet')
box = AnnotationBbox(oi, (-0.5*i,1*i), frameon=True)
a.append(ax.add_artist(box))
ax.annotate(names[i],xy=(-0.5*i-1.1,1*i-0.9),xycoords='data',textcoords='data',xytext=(-0.5*i-4-(len(names[i])-10)*0.1,1*i-0.85),arrowprops=dict(facecolor='black',shrink=0.05))
i = -1
oi = OffsetImage(imgFire, zoom = 2.0, cmap='jet')
box = AnnotationBbox(oi, (-0.5*i,1*i), frameon=True)
ax.annotate('Fire Perimiter',xy=(-0.5*i-1.1,1*i-0.9),xycoords='data',textcoords='data',xytext=(-0.5*i-4,1*i-0.85),arrowprops=dict(facecolor='black',shrink=0.05))
a.append(ax.add_artist(box))
plt.xlim(-2,6.15)
plt.ylim(-1.9,12.2)
plt.xlim(-9.0,1.4)
#plt.ylim(-50,50)
plt.axis('off')
plt.tight_layout()
plt.savefig('..%soutputs%sinputsExampleManyFires.png'%(os.sep, os.sep),dpi=300)
def makeFirePerimetersFigure(imgFire):
import skimage.transform as sktf
import skimage.filters as skfi
from mpl_toolkits.axes_grid1 import make_axes_locatable
oi = skfi.gaussian(imgFire,sigma=1.0,preserve_range=True)
imgFire = visualizeInputValues(directions,rosVectors,params,resX=250,resY=250)
imgFire[125:126,125:126] = 0
imgFire = imgFire[25:175,100:]
imgFire = imgFire[::-1,:]
#oi = OffsetImage(imgFire, zoom = 2.0, cmap='jet')
plt.figure(figsize=(12,12))
ax = plt.gca()
fs=32
im = ax.imshow(imgFire,cmap='hot_r')
plt.gca().invert_yaxis()
plt.xlabel('km',fontsize=fs)
plt.ylabel('km',fontsize=fs)
plt.tick_params(labelsize=fs)
plt.xticks([0,20,40,60,80,100,120,140])
plt.yticks([0,20,40,60,80,100,120,140])
divider = make_axes_locatable(ax)
cax = divider.append_axes("right",size="5%", pad=0.05)
c = plt.colorbar(im,ticks=[48,36,24,12,0],cax=cax)
#plt.title('Fire spread')
plt.tick_params(labelsize=fs)
plt.ylabel('Hours',fontsize=fs)
#c.ax.set_label(fontsize=fs)
plt.tight_layout()
plt.savefig('..%soutputs%sexampleFirePerimiter.eps'%(os.sep, os.sep))
if __name__ == "__main__":
''' case0: Generate 1 set of random inputs and visualize the results.
case1: Generate set of 100 random inputs and save inputs for validation
with behavePlus.
case2: Re-generate prediction with same random inputs.
case3: Re-generate single validation output.
case4: Generate validation plots.
case5: Generate neural network dataset
'''
case = 5
paramsInput = dict()
paramsInput['model'] = [None,True,False] # string
paramsInput['canopyCover'] = [None,0.0,1.0] # percent (0-1)
paramsInput['canopyHeight'] = [None,1.0,20.0] # ft (1-20)
paramsInput['crownRatio'] = [None,0.1,1.0] # fraction (0.1-1)
paramsInput['m1h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['m10h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['m100h'] = [None,1.0,40.0] # percent (1-60)
paramsInput['lhm'] = [None,30.0,100.0] # percent (30-300)
paramsInput['lwm'] = [None,30.0,100.0] # percent (30-300)
paramsInput['windSpeed'] = [None,0.0,30.0] # mph (0-30)
paramsInput['windDir'] = [None,0.0,360.0] # degrees (0-360)
paramsInput['slope'] = [None,0.0,1.0] # fraction (0-1)
paramsInput['aspect'] = [None,0.0,360.0] # degrees (0-360)
paramsInput['Tmin'] = [None,2.0,16.6] # degrees C
paramsInput['Tmax'] = [None,28.9,37.2] # degrees C
resX = 50
resY = 50
"""
"""
#params['m1h'] = 40.0
#params['windSpeed'] = 0.0 # mph (0-30)
#params['windDir'] = -135.0 # degrees (0-360)
#params['slope'] = 0.0 # fraction (0-1)
#params['aspect'] = 135
if case == 0:
params = getRandomConditions(paramsInput,allowDynamicModels=True)
params['model'] = 'TU2'
params['canopyCover'] = 0.0607 # percent (0-1)
params['canopyHeight'] = 17.46 # ft (1-20)
params['crownRatio'] = 0.99 # fraction (0-1)
params['m1h'] = 8.4 # percent (1-100)
params['m10h'] = 6 # percent (1-100)
params['m100h'] = 4 # percent (1-100)
params['lhm'] = 82.75 # percent (30-300)
params['lwm'] = 75.98 # percent (30-300)
params['windSpeed'] = 12.08 # mph (0-30)
params['windDir'] = 223.57 # degrees (0-360)
params['slope'] = 0.9942 # fraction (0-1)
params['aspect'] = 248.29 # degrees (0-360)
directions, rosVectors = getROSfromParams(params,toPrint=True)
visualizeInputImgs(directions,rosVectors,params,resX=resX,resY=resY)
visualizeInputValues(directions,rosVectors,params,resX=resX,resY=resY)
elif case == 1:
allParams = []
allDirections = []
allRosVectors = []
for i in range(0,1000):
params = getRandomConditions(paramsInput,allowDynamicModels=True)
directions, rosVectors = getROSfromParams(params)
allParams.append(orderParams(params))
allDirections.append(directions)
allRosVectors.append(rosVectors)
allParams = np.array(allParams).T
#pd.DataFrame(allParams[1:,:],columns=allParams[0,:]).astype(float).round(2).to_csv('../rothermelData/validationInputs.csv')
#pd.DataFrame(allDirections).T.to_csv('../rothermelData/validationDirections.csv')
#pd.DataFrame(allRosVectors).T.to_csv('../rothermelData/validationRosVectors.csv')
elif case == 2:
allParams = pd.read_csv('../rothermelData/validationInputs.csv')
allDirections = []
allRosVectors = []
for i in range(1,allParams.values.shape[1]):
paramsRaw = allParams.values[:,i]
params = paramListTodict(paramsRaw)
directions, rosVectors = getROSfromParams(params)
allParams.append(orderParams(params))
allDirections.append(directions)
allRosVectors.append(rosVectors)
allParams = np.array(allParams).T
pd.DataFrame(allDirections).T.to_csv('../rothermelData/validationDirections.csv')
| pd.DataFrame(allRosVectors) | pandas.DataFrame |
import unittest
import numpy as np
from pandas import Index
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as common
import pandas._tseries as lib
class TestTseriesUtil(unittest.TestCase):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isnull(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_merge_indexer(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.merge_indexer_object(new, old.indexMap)
expect_filler = [-1, 0, -1, -1, -1, 1, -1, -1, -1, -1, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.merge_indexer_object(new, old.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_pad(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = | lib.pad_object(old, new, old.indexMap, new.indexMap) | pandas._tseries.pad_object |
import os
# Enforce CPU Usage
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Uncommenting enforces CPU usage # Commenting enforces GPU usage
# Seed the Random-Number-Generator in a bid to get 'Reproducible Results'
import tensorflow as tf
from random import seed, sample
from numpy.random import seed
seed(1)
tf.compat.v1.set_random_seed(3)
# load required modules
import pandas as pd
import numpy as np
import math, time
from datetime import datetime, timedelta
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import QuantileTransformer, MinMaxScaler, Normalizer
from sklearn.metrics import mean_squared_error, explained_variance_score, mean_absolute_error, r2_score
import matplotlib.pyplot as plt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.ensemble import ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression, SGDRegressor, BayesianRidge, ARDRegression
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
# Import classes from my custom package
from custom_classes.Starter_Module_01 import Starter
# Global settings for PANDAS frame display
pd.set_option('html.table_schema', True)
pd.set_option('max_columns', 800)
pd.set_option('max_rows', 70000)
def args_parse_cmd():
parser = ArgumentParser(description='START-HELP: Program for forecasting/predicting breakup or schism in social networks', epilog='END-HELP: End of assistance/help section',
formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument('-rp', '--root_path', nargs='+', default='datasets/', type=str, help='Generic root path for application/program')
parser.add_argument('-rm', '--run_mode', nargs='+', default='single', type=str, choices=['single', 'all'], help='Run model per specified dataset OR cumulatively for all intrinsic datasets')
args = parser.parse_args()
return args
def process_reprd_idx(myCls, root_path, fname):
df1 = myCls.load_data(root_path, fname, sep=',', header=0, index_col=0, mode='READ')
exp_reprd_idx = pd.DataFrame()
for i in range(len(df1)):
tmp_start_date = df1.iloc[i,1]
start_date = datetime.strptime(tmp_start_date, '%d-%m-%Y')
tmp_end_date = df1.iloc[i,2]
end_date = datetime.strptime(tmp_end_date, '%d-%m-%Y')
while (start_date <= end_date):
exp_reprd_idx = exp_reprd_idx.append([[str(start_date), df1.values[i,3]]], ignore_index=True)
start_date = start_date + timedelta(days=1)
exp_reprd_idx.to_csv(root_path+fname[:-4]+'_EXPAND.csv', sep=',', header=False, index=False)
def cumm_2_reg_conv(myCls, root_path, fname):
#df1 = myCls.load_data(root_path, fname, sep='\s', header=0, index_col=None, mode='READ')
df1 = myCls.load_data(root_path, fname, sep=',', header=0, index_col=None, mode='READ')
infect_recover_death = | pd.DataFrame() | pandas.DataFrame |
import json
import os
import glob
import random
from typing import Union
try:
import xarray as xr
except ModuleNotFoundError:
xr = None
import numpy as np
import pandas as pd
from .datasets import Datasets
from .utils import check_attributes, download, sanity_check
from ai4water.utils.utils import dateandtime_now
try: # shapely may not be installed, as it may be difficult to isntall and is only needed for plotting data.
from ai4water.pre_processing.spatial_utils import plot_shapefile
except ModuleNotFoundError:
plot_shapefile = None
# directory separator
SEP = os.sep
def gb_message():
link = "https://doi.org/10.5285/8344e4f3-d2ea-44f5-8afa-86d2987543a9"
raise ValueError(f"Dwonlaoad the data from {link} and provide the directory "
f"path as dataset=Camels(data=data)")
class Camels(Datasets):
"""
Get CAMELS dataset.
This class first downloads the CAMELS dataset if it is not already downloaded.
Then the selected attribute for a selected id are fetched and provided to the
user using the method `fetch`.
Attributes
-----------
- ds_dir str/path: diretory of the dataset
- dynamic_features list: tells which dynamic attributes are available in
this dataset
- static_features list: a list of static attributes.
- static_attribute_categories list: tells which kinds of static attributes
are present in this category.
Methods
---------
- stations : returns name/id of stations for which the data (dynamic attributes)
exists as list of strings.
- fetch : fetches all attributes (both static and dynamic type) of all
station/gauge_ids or a speficified station. It can also be used to
fetch all attributes of a number of stations ids either by providing
their guage_id or by just saying that we need data of 20 stations
which will then be chosen randomly.
- fetch_dynamic_features :
fetches speficied dynamic attributes of one specified station. If the
dynamic attribute is not specified, all dynamic attributes will be
fetched for the specified station. If station is not specified, the
specified dynamic attributes will be fetched for all stations.
- fetch_static_features :
works same as `fetch_dynamic_features` but for `static` attributes.
Here if the `category` is not specified then static attributes of
the specified station for all categories are returned.
stations : returns list of stations
"""
DATASETS = {
'CAMELS-BR': {'url': "https://zenodo.org/record/3964745#.YA6rUxZS-Uk",
},
'CAMELS-GB': {'url': gb_message},
}
def stations(self):
raise NotImplementedError
def _read_dynamic_from_csv(self, stations, dynamic_features, st=None, en=None):
raise NotImplementedError
def fetch_static_features(self, station, features):
raise NotImplementedError
@property
def start(self): # start of data
raise NotImplementedError
@property
def end(self): # end of data
raise NotImplementedError
@property
def dynamic_features(self)->list:
raise NotImplementedError
def _check_length(self, st, en):
if st is None:
st = self.start
if en is None:
en = self.end
return st, en
def to_ts(self, static, st, en, as_ts=False, freq='D'):
st, en = self._check_length(st, en)
if as_ts:
idx = pd.date_range(st, en, freq=freq)
static = pd.DataFrame(np.repeat(static.values, len(idx), axis=0), index=idx,
columns=static.columns)
return static
else:
return static
@property
def camels_dir(self):
"""Directory where all camels datasets will be saved. This will under
datasets directory"""
return os.path.join(self.base_ds_dir, "CAMELS")
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
if x is None:
x = os.path.join(self.camels_dir, self.__class__.__name__)
if not os.path.exists(x):
os.makedirs(x)
# sanity_check(self.name, x)
self._ds_dir = x
def fetch(self,
stations: Union[str, list, int, float, None] = None,
dynamic_features: Union[list, str, None] = 'all',
static_features: Union[str, list, None] = None,
st: Union[None, str] = None,
en: Union[None, str] = None,
as_dataframe:bool = False,
**kwargs
) -> Union[dict, pd.DataFrame]:
"""
Fetches the attributes of one or more stations.
Arguments:
stations : if string, it is supposed to be a station name/gauge_id.
If list, it will be a list of station/gauge_ids. If int, it will
be supposed that the user want data for this number of
stations/gauge_ids. If None (default), then attributes of all
available stations. If float, it will be supposed that the user
wants data of this fraction of stations.
dynamic_features : If not None, then it is the attributes to be
fetched. If None, then all available attributes are fetched
static_features : list of static attributes to be fetches. None
means no static attribute will be fetched.
st : starting date of data to be returned. If None, the data will be
returned from where it is available.
en : end date of data to be returned. If None, then the data will be
returned till the date data is available.
as_dataframe : whether to return dynamic attributes as pandas
dataframe or as xarray dataset.
kwargs : keyword arguments to read the files
returns:
If both static and dynamic features are obtained then it returns a
dictionary whose keys are station/gauge_ids and values are the
attributes and dataframes.
Otherwise either dynamic or static features are returned.
"""
if isinstance(stations, int):
# the user has asked to randomly provide data for some specified number of stations
stations = random.sample(self.stations(), stations)
elif isinstance(stations, list):
pass
elif isinstance(stations, str):
stations = [stations]
elif isinstance(stations, float):
num_stations = int(len(self.stations()) * stations)
stations = random.sample(self.stations(), num_stations)
elif stations is None:
# fetch for all stations
stations = self.stations()
else:
raise TypeError(f"Unknown value provided for stations {stations}")
if xr is None:
raise ModuleNotFoundError("modeule xarray must be installed to use `datasets` module")
return self.fetch_stations_attributes(stations,
dynamic_features,
static_features,
st=st,
en=en,
as_dataframe=as_dataframe,
**kwargs)
def _maybe_to_netcdf(self, fname:str):
self.dyn_fname = os.path.join(self.ds_dir, f'{fname}.nc')
if not os.path.exists(self.dyn_fname):
# saving all the data in netCDF file using xarray
print(f'converting data to netcdf format for faster io operations')
data = self.fetch(static_features=None)
data_vars = {}
coords = {}
for k, v in data.items():
data_vars[k] = (['time', 'dynamic_features'], v)
index = v.index
index.name = 'time'
coords = {
'dynamic_features': list(v.columns),
'time': index
}
xds = xr.Dataset(
data_vars=data_vars,
coords=coords,
attrs={'date': f"create on {dateandtime_now()}"}
)
xds.to_netcdf(self.dyn_fname)
def fetch_stations_attributes(self,
stations: list,
dynamic_features='all',
static_features=None,
st=None,
en=None,
as_dataframe:bool = False,
**kwargs):
"""Reads attributes of more than one stations.
Arguments:
stations : list of stations for which data is to be fetched.
dynamic_features : list of dynamic attributes to be fetched.
if 'all', then all dynamic attributes will be fetched.
static_features : list of static attributes to be fetched.
If `all`, then all static attributes will be fetched. If None,
then no static attribute will be fetched.
st : start of data to be fetched.
en : end of data to be fetched.
as_dataframe : whether to return the data as pandas dataframe. default
is xr.dataset object
kwargs dict: additional keyword arguments
Returns:
Dynamic and static features of multiple stations. Dynamic features
are by default returned as xr.Dataset unless `as_dataframe` is True, in
such a case, it is a pandas dataframe with multiindex. If xr.Dataset,
it consists of `data_vars` equal to number of stations and for each
station, the `DataArray` is of dimensions (time, dynamic_features).
where `time` is defined by `st` and `en` i.e length of `DataArray`.
In case, when the returned object is pandas DataFrame, the first index
is `time` and second index is `dyanamic_features`. Static attributes
are always returned as pandas DataFrame and have following shape
`(stations, static_features). If `dynamic_features` is None,
then they are not returned and the returned value only consists of
static features. Same holds true for `static_features`.
If both are not None, then the returned type is a dictionary with
`static` and `dynamic` keys.
Raises:
ValueError, if both dynamic_features and static_features are None
"""
st, en = self._check_length(st, en)
if dynamic_features is not None:
dynamic_features = check_attributes(dynamic_features, self.dynamic_features)
if not os.path.exists(self.dyn_fname):
# read from csv files
# following code will run only once when fetch is called inside init method
dyn = self._read_dynamic_from_csv(stations, dynamic_features, st=st, en=en)
else:
dyn = xr.load_dataset(self.dyn_fname) # daataset
dyn = dyn[stations].sel(dynamic_features=dynamic_features, time=slice(st, en))
if as_dataframe:
dyn = dyn.to_dataframe(['time', 'dynamic_features'])
if static_features is not None:
static = self.fetch_static_features(stations, static_features)
stns = {'dynamic': dyn, 'static': static}
else:
stns = dyn
elif static_features is not None:
return self.fetch_static_features(stations, static_features)
else:
raise ValueError
return stns
def fetch_dynamic_features(self,
stn_id,
attributes='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches all or selected dynamic attributes of one station."""
assert isinstance(stn_id, str)
station = [stn_id]
return self.fetch_stations_attributes(station,
attributes,
None,
st=st,
en=en,
as_dataframe=as_dataframe)
def fetch_station_attributes(self,
station: str,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
as_ts: bool = False,
st: Union[str, None] = None,
en: Union[str, None] = None,
**kwargs) -> pd.DataFrame:
"""
Fetches attributes for one station.
Arguments:
station : station id/gauge id for which the data is to be fetched.
dynamic_features
static_features
as_ts : whether static attributes are to be converted into a time
series or not. If yes then the returned time series will be of
same length as that of dynamic attribtues.
st : starting point from which the data to be fetched. By default
the data will be fetched from where it is available.
en : end point of data to be fetched. By default the dat will be fetched
Return:
dataframe if as_ts is True else it returns a dictionary of static and
dynamic attributes for a station/gauge_id
"""
st, en = self._check_length(st, en)
station_df = pd.DataFrame()
if dynamic_features:
dynamic = self.fetch_dynamic_features(station, dynamic_features, st=st,
en=en, **kwargs)
station_df = pd.concat([station_df, dynamic])
if static_features is not None:
static = self.fetch_static_features(station, static_features)
if as_ts:
station_df = pd.concat([station_df, static], axis=1)
else:
station_df ={'dynamic': station_df, 'static': static}
elif static_features is not None:
station_df = self.fetch_static_features(station, static_features)
return station_df
class LamaH(Camels):
"""
Large-Sample Data for Hydrology and Environmental Sciences for Central Europe
from url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
paper: https://essd.copernicus.org/preprints/essd-2021-72/
"""
url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
_data_types = ['total_upstrm', 'diff_upstrm_all', 'diff_upstrm_lowimp'
]
time_steps = ['daily', 'hourly'
]
static_attribute_categories = ['']
def __init__(self, *,
time_step: str,
data_type: str,
**kwargs
):
"""
Arguments:
time_step : possible values are `daily` or `hourly`
data_type : possible values are `total_upstrm`, `diff_upstrm_all`
or 'diff_upstrm_lowimp'
"""
assert time_step in self.time_steps, f"invalid time_step {time_step} given"
assert data_type in self._data_types, f"invalid data_type {data_type} given."
self.time_step = time_step
self.data_type = data_type
super().__init__(**kwargs)
self._download()
fpath = os.path.join(self.ds_dir, 'lamah_diff_upstrm_lowimp_hourly_dyn.nc')
_data_types = self._data_types if self.time_step == 'daily' else ['total_upstrm']
if not os.path.exists(fpath):
for dt in _data_types:
for ts in self.time_steps:
self.time_step = ts
self.data_type = dt
fname = f"lamah_{dt}_{ts}_dyn"
self._maybe_to_netcdf(fname)
self.time_step = time_step
self.data_type = data_type
self.dyn_fname = os.path.join(self.ds_dir, f'lamah_{data_type}_{time_step}_dyn.nc')
@property
def dynamic_features(self):
station = self.stations()[0]
df = self.read_ts_of_station(station)
return df.columns.to_list()
@property
def static_features(self) -> list:
fname = os.path.join(self.data_type_dir, f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
return df.columns.to_list()
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def data_type_dir(self):
directory = 'CAMELS_AT'
if self.time_step == 'hourly':
directory = 'CAMELS_AT1' # todo, use it only for hourly, daily is causing errors
# self.ds_dir/CAMELS_AT/data_type_dir
f = [f for f in os.listdir(os.path.join(self.ds_dir, directory)) if self.data_type in f][0]
return os.path.join(self.ds_dir, f'{directory}{SEP}{f}')
def stations(self)->list:
# assuming file_names of the format ID_{stn_id}.csv
_dirs = os.listdir(os.path.join(self.data_type_dir, f'2_timeseries{SEP}{self.time_step}'))
s = [f.split('_')[1].split('.csv')[0] for f in _dirs]
return s
def _read_dynamic_from_csv(self,
stations,
dynamic_features:Union[str, list]='all',
st=None,
en=None,
):
"""Reads attributes of one station"""
stations_attributes = {}
for station in stations:
station_df = pd.DataFrame()
if dynamic_features is not None:
dynamic_df = self.read_ts_of_station(station)
station_df = pd.concat([station_df, dynamic_df])
stations_attributes[station] = station_df
return stations_attributes
def fetch_static_features(self,
station:Union[str, list],
features=None
)->pd.DataFrame:
fname = os.path.join(self.data_type_dir, f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
#if features is not None:
static_features = check_attributes(features, self.static_features)
df = df[static_features]
if isinstance(station, list):
stations = [str(i) for i in station]
elif isinstance(station, int):
stations = str(station)
else:
stations = station
df.index = df.index.astype(str)
df = df.loc[stations]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
def read_ts_of_station(self, station) -> pd.DataFrame:
# read a file containing timeseries data for one station
fname = os.path.join(self.data_type_dir,
f'2_timeseries{SEP}{self.time_step}{SEP}ID_{station}.csv')
df = pd.read_csv(fname, sep=';')
if self.time_step == 'daily':
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"], freq="D")
df.index = periods.to_timestamp()
else:
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"], hour=df["hh"], minute=df["mm"], freq="H")
df.index = periods.to_timestamp()
# remove the cols specifying index
[df.pop(item) for item in ['YYYY', 'MM', 'DD', 'hh', 'mm'] if item in df]
return df
@property
def start(self):
return "19810101"
@property
def end(self):
return "20191231"
class HYSETS(Camels):
"""
database for hydrometeorological modeling of 14,425 North American watersheds
from 1950-2018 following the work of
[Arsenault et al., 2020](https://doi.org/10.1038/s41597-020-00583-2)
The user must manually download the files, unpack them and provide
the `path` where these files are saved.
This data comes with multiple sources. Each source having one or more dynamic_features
Following data_source are available.
|sources | dynamic_features |
|---------------|------------------|
|SNODAS_SWE | dscharge, swe|
|SCDNA | discharge, pr, tasmin, tasmax|
|nonQC_stations | discharge, pr, tasmin, tasmax|
|Livneh | discharge, pr, tasmin, tasmax|
|ERA5 | discharge, pr, tasmax, tasmin|
|ERAS5Land_SWE | discharge, swe|
|ERA5Land | discharge, pr, tasmax, tasmin|
all sources contain one or more following dynamic_features
with following shapes
|dynamic_features | shape |
|----------------------------|------------|
|time | (25202,) |
|watershedID | (14425,) |
|drainage_area | (14425,) |
|drainage_area_GSIM | (14425,) |
|flag_GSIM_boundaries | (14425,) |
|flag_artificial_boundaries | (14425,) |
|centroid_lat | (14425,) |
|centroid_lon | (14425,) |
|elevation | (14425,) |
|slope | (14425,) |
|discharge | (14425, 25202) |
|pr | (14425, 25202) |
|tasmax | (14425, 25202) |
|tasmin | (14425, 25202) |
"""
doi = "https://doi.org/10.1038/s41597-020-00583-2"
url = "https://osf.io/rpc3w/"
Q_SRC = ['ERA5', 'ERA5Land', 'ERA5Land_SWE', 'Livneh', 'nonQC_stations', 'SCDNA', 'SNODAS_SWE']
SWE_SRC = ['ERA5Land_SWE', 'SNODAS_SWE']
OTHER_SRC = [src for src in Q_SRC if src not in ['ERA5Land_SWE', 'SNODAS_SWE']]
dynamic_features = ['discharge', 'swe', 'tasmin', 'tasmax', 'pr']
def __init__(self,
path:str,
swe_source:str = "SNODAS_SWE",
discharge_source: str = "ERA5",
tasmin_source: str = "ERA5",
tasmax_source: str = "ERA5",
pr_source: str = "ERA5",
**kwargs
):
"""
Arguments:
path : path where all the data files are saved.
swe_source : source of swe data.
discharge_source : source of discharge data
tasmin_source : source of tasmin data
tasmax_source : source of tasmax data
pr_source : source of pr data
kwargs : arguments for `Camels` base class
"""
assert swe_source in self.SWE_SRC, f'source must be one of {self.SWE_SRC}'
assert discharge_source in self.Q_SRC, f'source must be one of {self.Q_SRC}'
assert tasmin_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert tasmax_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert pr_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
self.sources = {
'swe': swe_source,
'discharge': discharge_source,
'tasmin': tasmin_source,
'tasmax': tasmax_source,
'pr': pr_source
}
super().__init__(**kwargs)
self.ds_dir = path
fpath = os.path.join(self.ds_dir, 'hysets_dyn.nc')
if not os.path.exists(fpath):
self._maybe_to_netcdf('hysets_dyn')
def _maybe_to_netcdf(self, fname:str):
# todo saving as one file takes very long time
oneD_vars = []
twoD_vars = []
for src in self.Q_SRC:
xds = xr.open_dataset(os.path.join(self.ds_dir, f'HYSETS_2020_{src}.nc'))
for var in xds.variables:
print(f'getting {var} from source {src} ')
if len(xds[var].data.shape) > 1:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
twoD_vars.append(xar)
else:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
oneD_vars.append(xar)
oneD_xds = xr.merge(oneD_vars)
twoD_xds = xr.merge(twoD_vars)
oneD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_static.nc"))
twoD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_dyn.nc"))
return
@property
def ds_dir(self):
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
sanity_check('HYSETS', x)
self._ds_dir = x
@property
def static_features(self):
df = self.read_static_data()
return df.columns.to_list()
def stations(self) -> list:
return self.read_static_data().index.to_list()
@property
def start(self):
return "19500101"
@property
def end(self):
return "20181231"
def fetch_stations_attributes(self,
stations: list,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
st = None,
en = None,
as_dataframe: bool = False,
**kwargs):
stations = check_attributes(stations, self.stations())
stations = [int(stn) for stn in stations]
if dynamic_features is not None:
dyn = self._fetch_dynamic_features(stations=stations,
dynamic_features=dynamic_features,
as_dataframe=as_dataframe,
**kwargs
)
if static_features is not None: # we want both static and dynamic
to_return = {}
static = self._fetch_static_features(station=stations,
static_features=static_features,
**kwargs
)
to_return['static'] = static
to_return['dynamic'] = dyn
else:
to_return = dyn
elif static_features is not None:
# we want only static
to_return = self._fetch_static_features(
station=stations,
static_features=static_features,
**kwargs
)
else:
raise ValueError
return to_return
def fetch_dynamic_features(self,
station,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches dynamic attributes of one station."""
station = [int(station)]
return self._fetch_dynamic_features(stations=station,
dynamic_features=dynamic_features,
st=st,
en=en,
as_dataframe=as_dataframe)
def _fetch_dynamic_features(self,
stations:list,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False,
as_ts=False
):
"""Fetches dynamic attributes of station."""
st, en = self._check_length(st, en)
attrs = check_attributes(dynamic_features, self.dynamic_features)
stations = np.subtract(stations, 1).tolist()
# maybe we don't need to read all variables
sources = {k:v for k,v in self.sources.items() if k in attrs}
# original .nc file contains datasets with dynamic and static features as data_vars
# however, for uniformity of this API and easy usage, we want a Dataset to have
# station names/gauge_ids as data_vars and each data_var has
# dimension (time, dynamic_variables)
# Therefore, first read all data for each station from .nc file
# then rearrange it.
# todo, this operation is slower because of `to_dataframe`
# also doing this removes all the metadata
x = {}
f = os.path.join(self.ds_dir, "hysets_dyn.nc")
xds = xr.open_dataset(f)
for stn in stations:
xds1 = xds[[f'{k}_{v}' for k, v in sources.items()]].sel(watershed=stn, time=slice(st, en))
xds1 = xds1.rename_vars({f'{k}_{v}': k for k, v in sources.items()})
x[stn] = xds1.to_dataframe(['time'])
xds = xr.Dataset(x)
xds = xds.rename_dims({'dim_1': 'dynamic_features'})
xds = xds.rename_vars({'dim_1': 'dynamic_features'})
if as_dataframe:
return xds.to_dataframe(['time', 'dynamic_features'])
return xds
def _fetch_static_features(self,
station,
static_features:Union[str, list]='all',
st=None,
en=None,
as_ts=False):
df = self.read_static_data()
static_features = check_attributes(static_features, self.static_features)
if isinstance(station, str):
station = [station]
elif isinstance(station, int):
station = [str(station)]
elif isinstance(station, list):
station = [str(stn) for stn in station]
else:
raise ValueError
return self.to_ts(df.loc[station][static_features], st=st, en=en, as_ts=as_ts)
def fetch_static_features(self,
station,
features='all',
st=None,
en=None,
as_ts=False
)->pd.DataFrame:
return self._fetch_static_features(station, features, st, en, as_ts)
def read_static_data(self):
fname = os.path.join(self.ds_dir, 'HYSETS_watershed_properties.txt')
static_df = pd.read_csv(fname, index_col='Watershed_ID', sep=';')
static_df.index = static_df.index.astype(str)
return static_df
class CAMELS_US(Camels):
"""
Downloads and processes CAMELS dataset of 671 catchments named as CAMELS
from https://ral.ucar.edu/solutions/products/camels
https://doi.org/10.5194/hess-19-209-2015
"""
DATASETS = ['CAMELS_US']
url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip"
catchment_attr_url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip"
folders = {'basin_mean_daymet': f'basin_mean_forcing{SEP}daymet',
'basin_mean_maurer': f'basin_mean_forcing{SEP}maurer',
'basin_mean_nldas': f'basin_mean_forcing{SEP}nldas',
'basin_mean_v1p15_daymet': f'basin_mean_forcing{SEP}v1p15{SEP}daymet',
'basin_mean_v1p15_nldas': f'basin_mean_forcing{SEP}v1p15{SEP}nldas',
'elev_bands': f'elev{SEP}daymet',
'hru': f'hru_forcing{SEP}daymet'}
dynamic_features = ['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)', 'Flow']
def __init__(self, data_source='basin_mean_daymet'):
assert data_source in self.folders, f'allwed data sources are {self.folders.keys()}'
self.data_source = data_source
super().__init__("CAMELS_US")
if os.path.exists(self.ds_dir):
print(f"dataset is already downloaded at {self.ds_dir}")
else:
download(self.url, os.path.join(self.camels_dir, f'CAMELS_US{SEP}CAMELS_US.zip'))
download(self.catchment_attr_url, os.path.join(self.camels_dir, f"CAMELS_US{SEP}catchment_attrs.zip"))
self._unzip()
self.attr_dir = os.path.join(self.ds_dir, f'catchment_attrs{SEP}camels_attributes_v2.0')
self.dataset_dir = os.path.join(self.ds_dir, f'CAMELS_US{SEP}basin_dataset_public_v1p2')
self._maybe_to_netcdf('camels_us_dyn')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def start(self):
return "19800101"
@property
def end(self):
return "20141231"
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
cols = []
for f in files:
_df = pd.read_csv(f, sep=';', index_col='gauge_id', nrows=1)
cols += list(_df.columns)
else:
df = pd.read_csv(static_fpath, index_col='gauge_id', nrows=1)
cols = list(df.columns)
return cols
def stations(self) -> list:
stns = []
for _dir in os.listdir(os.path.join(self.dataset_dir, 'usgs_streamflow')):
cat = os.path.join(self.dataset_dir, f'usgs_streamflow{SEP}{_dir}')
stns += [fname.split('_')[0] for fname in os.listdir(cat)]
# remove stations for which static values are not available
for stn in ['06775500', '06846500', '09535100']:
stns.remove(stn)
return stns
def _read_dynamic_from_csv(self,
stations,
dynamic_features:Union[str, list]='all',
st=None,
en=None,
):
dyn = {}
for station in stations:
# attributes = check_attributes(dynamic_features, self.dynamic_features)
assert isinstance(station, str)
df = None
df1 = None
dir_name = self.folders[self.data_source]
for cat in os.listdir(os.path.join(self.dataset_dir, dir_name)):
cat_dirs = os.listdir(os.path.join(self.dataset_dir, f'{dir_name}{SEP}{cat}'))
stn_file = f'{station}_lump_cida_forcing_leap.txt'
if stn_file in cat_dirs:
df = pd.read_csv(os.path.join(self.dataset_dir,
f'{dir_name}{SEP}{cat}{SEP}{stn_file}'),
sep="\s+|;|:",
skiprows=4,
engine='python',
names=['Year', 'Mnth', 'Day', 'Hr', 'dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)'],
)
df.index = pd.to_datetime(df['Year'].map(str) + '-' + df['Mnth'].map(str) + '-' + df['Day'].map(str))
flow_dir = os.path.join(self.dataset_dir, 'usgs_streamflow')
for cat in os.listdir(flow_dir):
cat_dirs = os.listdir(os.path.join(flow_dir, cat))
stn_file = f'{station}_streamflow_qc.txt'
if stn_file in cat_dirs:
fpath = os.path.join(flow_dir, f'{cat}{SEP}{stn_file}')
df1 = pd.read_csv(fpath, sep="\s+|;|:'",
names=['station', 'Year', 'Month', 'Day', 'Flow', 'Flag'],
engine='python')
df1.index = pd.to_datetime(
df1['Year'].map(str) + '-' + df1['Month'].map(str) + '-' + df1['Day'].map(str))
out_df = pd.concat([df[['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)', 'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)']],
df1['Flow']],
axis=1)
dyn[station] = out_df
return dyn
def fetch_static_features(self, station, features):
attributes = check_attributes(features, self.static_features)
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
static_df = pd.DataFrame()
for f in files:
# index should be read as string
idx = pd.read_csv(f, sep=';', usecols=['gauge_id'], dtype=str)
_df = pd.read_csv(f, sep=';', index_col='gauge_id')
_df.index = idx['gauge_id']
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath, index_label='gauge_id')
else: # index should be read as string bcs it has 0s at the start
idx = pd.read_csv(static_fpath, usecols=['gauge_id'], dtype=str)
static_df = pd.read_csv(static_fpath, index_col='gauge_id')
static_df.index = idx['gauge_id']
static_df.index = static_df.index.astype(str)
df = static_df.loc[station][attributes]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
class CAMELS_BR(Camels):
"""
Downloads and processes CAMELS dataset of Brazil
"""
url = "https://zenodo.org/record/3964745#.YA6rUxZS-Uk"
folders = {'streamflow_m3s': '02_CAMELS_BR_streamflow_m3s',
'streamflow_mm': '03_CAMELS_BR_streamflow_mm_selected_catchments',
'simulated_streamflow_m3s': '04_CAMELS_BR_streamflow_simulated',
'precipitation_cpc': '07_CAMELS_BR_precipitation_cpc',
'precipitation_mswep': '06_CAMELS_BR_precipitation_mswep',
'precipitation_chirps': '05_CAMELS_BR_precipitation_chirps',
'evapotransp_gleam': '08_CAMELS_BR_evapotransp_gleam',
'evapotransp_mgb': '09_CAMELS_BR_evapotransp_mgb',
'potential_evapotransp_gleam': '10_CAMELS_BR_potential_evapotransp_gleam',
'temperature_min': '11_CAMELS_BR_temperature_min_cpc',
'temperature_mean': '12_CAMELS_BR_temperature_mean_cpc',
'temperature_max': '13_CAMELS_BR_temperature_max_cpc'
}
def __init__(self):
super().__init__("CAMELS-BR")
self._download()
self._maybe_to_netcdf('camels_dyn_br')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def _all_dirs(self):
"""All the folders in the dataset_directory"""
return [f for f in os.listdir(self.ds_dir) if os.path.isdir(os.path.join(self.ds_dir, f))]
@property
def static_dir(self):
path = None
for _dir in self._all_dirs:
if "attributes" in _dir:
# supposing that 'attributes' axist in only one file/folder in self.ds_dir
path = os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}')
return path
@property
def static_files(self):
all_files = None
if self.static_dir is not None:
all_files = glob.glob(f"{self.static_dir}/*.txt")
return all_files
@property
def dynamic_features(self) -> list:
return list(CAMELS_BR.folders.keys())
@property
def static_attribute_categories(self):
static_attrs = []
for f in self.static_files:
ff = str(os.path.basename(f).split('.txt')[0])
static_attrs.append('_'.join(ff.split('_')[2:]))
return static_attrs
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, '01_CAMELS_BR_attributes','01_CAMELS_BR_attributes')}/*.txt")
cols = []
for f in files:
_df = pd.read_csv(f, sep=' ', index_col='gauge_id', nrows=1)
cols += list(_df.columns)
else:
df = | pd.read_csv(static_fpath, index_col='gauge_id', nrows=1) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
def load_processed(srcpath, despath):
##Thank You Alessandro for this dictionary.
data = | pd.read_csv(srcpath) | pandas.read_csv |
# Extract the useful data from game files (json)
# Append the useful data to a csv file
import pickle
import os
import queue
import sys
from collections import OrderedDict
import multiprocessing
from multiprocessing.managers import BaseManager, NamespaceProxy
import time
import Modes
import pandas as pd
from collections import Counter
CHUNK_SIZE = 100
def extracted_writer(extracted_file, q, stop):
with open(extracted_file, 'a+') as f:
while not stop.is_set():
try:
game_path = q.get(timeout=1)
except queue.Empty:
continue
f.write(game_path)
f.write('\n')
print('Closing writer', file=sys.stderr)
class Extractor:
def __init__(self, mode, extracted_files, current_index, rot_length, writing_q):
self.mode = mode
self.rot_length = rot_length
self.writing_q = writing_q
self.current_index = current_index
if len(extracted_files) >= self.current_index > 0: # the file already exist
self.csv_file = os.path.join(mode.EXTRACTED_DIR, extracted_files[self.current_index - 1])
self.csv_index = len(pd.read_csv(self.csv_file, skiprows=1))
print(self.csv_file, 'lines', self.csv_index, file=sys.stderr)
else:
self.csv_file = None
self.csv_index = mode.DATA_LINES
class ExManager(BaseManager):
pass
class ExProxy(NamespaceProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__', 'b')
ExManager.register('Extractor', Extractor, ExProxy)
def run(mode, cpu):
extracted_file = mode.EXTRACTED_FILE
if os.path.isfile(extracted_file):
with open(extracted_file, 'r') as f:
extracted_list = [x.strip() for x in f.readlines()]
else:
extracted_list = []
gamePaths = []
for patch in mode.learning_patches:
for region in mode.REGIONS:
if os.path.isdir(os.path.join(mode.DATABASE, 'patches', patch, region)):
gamePaths.extend(
[os.path.join(mode.DATABASE, 'patches', patch, region, f) for f in
os.listdir(os.path.join(mode.DATABASE, 'patches', patch, region))])
print('%d game files found' % len(gamePaths), file=sys.stderr)
gamePaths = list(set(gamePaths) - set(extracted_list))
print('%d new games to extract' % len(gamePaths), file=sys.stderr)
if not os.path.isdir(mode.EXTRACTED_DIR):
os.makedirs(mode.EXTRACTED_DIR)
extracted_files = [f for f in os.listdir(mode.EXTRACTED_DIR)]
l = list(map(lambda x: int(x.replace('data_', '').replace('.csv', '')), extracted_files))
l = sorted(range(len(l)), key=lambda k: l[k])
extracted_files = [extracted_files[k] for k in l]
# multiprocessing
manager = multiprocessing.Manager()
writing_q = manager.Queue()
stop = manager.Event()
writer = multiprocessing.Process(target=extracted_writer, args=(extracted_file, writing_q, stop))
writer.start()
ex_manager = ExManager()
ex_manager.start()
available_extractors = []
running_extractors = []
for i in range(cpu):
current_index = len(extracted_files) - i
# noinspection PyUnresolvedReferences
available_extractors.append(ex_manager.Extractor(mode, extracted_files, current_index, cpu, writing_q))
while gamePaths:
# we work with chunks in order to save time (no need to hand over the extractor for every single game
chunk = gamePaths[:CHUNK_SIZE]
gamePaths = gamePaths[CHUNK_SIZE:]
print(len(gamePaths), 'left', file=sys.stderr)
while not available_extractors: # wait until an extractor is available
for p, ex in running_extractors:
if p.is_alive():
continue
available_extractors.append(ex)
running_extractors.remove((p, ex))
if not available_extractors: # wait a bit
time.sleep(0.001)
# start a new job
ex = available_extractors.pop()
p = multiprocessing.Process(target=analyze_game, args=(ex, chunk,))
running_extractors.append((p, ex))
p.start()
for p, ex in running_extractors:
p.join()
stop.set()
writer.join()
print('-- Extraction complete --')
def analyze_game(ex, gamePaths):
for gamePath in gamePaths:
raw_data = OrderedDict([('s_' + champ, []) for champ in ex.mode.CHAMPIONS_LABEL] + [('p_' + champ, []) for champ in ex.mode.CHAMPIONS_LABEL])
raw_data['patch'] = []
raw_data['win'] = []
raw_data['file'] = []
print(ex.csv_file, gamePath)
game = pickle.load(open(gamePath, 'rb'))
bans = []
game_patch = '_'.join(game['gameVersion'].split('.')[:2])
if game['gameDuration'] < 300:
print(gamePath, 'FF afk', game['gameDuration'], file=sys.stderr)
ex.writing_q.put(gamePath)
continue
blueTeam = None
redTeam = None
for team in game['teams']:
if team['teamId'] == 100:
blueTeam = team
elif team['teamId'] == 200:
redTeam = team
else:
print(gamePath, 'Unrecognized team %d' % team['teamId'], file=sys.stderr)
break
for ban in team['bans']:
championId = ban['championId']
if championId not in bans:
bans.append(championId)
if not blueTeam or not redTeam:
print(gamePath, 'Teams are not recognized', file=sys.stderr)
ex.writing_q.put(gamePath)
continue
# not sure what is written for voided games, so it's safer to check both
# if we get something else than true/false or false/true we just ignore the file
blueWin = blueTeam['win'] == 'Win'
redWin = redTeam['win'] == 'Win'
if not blueWin ^ redWin:
print(gamePath, 'No winner found', blueWin, redWin, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
participants = game['participants']
# Blank, everything is available
state = OrderedDict()
state['win'] = int(blueWin)
state['patch'] = game_patch
state['file'] = os.path.basename(gamePath)
state.update([('s_' + champ_name, 'A') for champ_name in ex.mode.CHAMPIONS_LABEL]) # Status
state.update([('p_' + champ_name, 'N') for champ_name in ex.mode.CHAMPIONS_LABEL]) # Position
for key, value in state.items():
raw_data[key].append(value)
# Bans
state = OrderedDict(state) # don't forget to create a clean copy
for championId in bans:
for champ_name, champ_id in ex.mode.CHAMPIONS_ID.items():
if champ_id == championId:
state['s_' + champ_name] = 'N' # None
break
for key, value in state.items():
raw_data[key].append(value)
# Smart lane-role
# The Api doesn't precisely give players role, so we have to deduce it
b_roles = OrderedDict()
r_roles = OrderedDict()
for i in range(0, 10):
p = participants[i]
lane = p['timeline']['lane']
if i < 5:
if lane == 'TOP':
b_roles[i] = 'T'
elif lane == 'JUNGLE':
b_roles[i] = 'J'
elif lane == 'MIDDLE':
b_roles[i] = 'M'
elif lane == 'BOTTOM':
b_roles[i] = 'C'
elif lane == 'NONE':
b_roles[i] = '?' # Fill missing lane if possible
else:
raise Exception(p, lane)
else:
if lane == 'TOP':
r_roles[i] = 'T'
elif lane == 'JUNGLE':
r_roles[i] = 'J'
elif lane == 'MIDDLE':
r_roles[i] = 'M'
elif lane == 'BOTTOM':
r_roles[i] = 'C'
elif lane == 'NONE':
r_roles[i] = '?' # Fill missing lane if possible
else:
raise Exception(p, lane)
# Fill missing role '?'
# target at this point is something like 'T', 'J', 'M', 'C', 'C'
b_toFillCount = Counter(b_roles.values())['?']
if b_toFillCount > 1:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
elif b_toFillCount == 1:
fill_index = list(b_roles.keys())[list(b_roles.values()).index('?')]
possible_roles = ['T', 'J', 'M', 'C']
missing_roles = list(set(possible_roles)-set(b_roles.values()))
if len(missing_roles) == 1:
# non-bot role
b_roles[fill_index] = missing_roles[0]
elif len(missing_roles) == 0:
# bot, whether it is support will be determined later
b_roles[fill_index] = 'C'
else:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
r_toFillCount = Counter(r_roles.values())['?']
if r_toFillCount > 1:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
elif r_toFillCount == 1:
fill_index = list(r_roles.keys())[list(r_roles.values()).index('?')]
possible_roles = ['T', 'J', 'M', 'C']
missing_roles = list(set(possible_roles)-set(r_roles.values()))
if len(missing_roles) == 1:
# non-bot role
r_roles[fill_index] = missing_roles[0]
elif len(missing_roles) == 0:
# bot, whether it is support will be determined later
r_roles[fill_index] = 'C'
else:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
# need to find the support in both team
# a lane will appear twice, most likely 'C'
# the support will either be tagged as 'SUPPORT' or have a low cs count
b_doubleRole = Counter(b_roles.values()).most_common(1)[0][0]
b_doublei = [i for i, r in b_roles.items() if r == b_doubleRole]
if len(b_doublei) > 2:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
if 'SUPPORT' in participants[b_doublei[0]]['timeline']['role']:
b_roles[b_doublei[0]] = 'S'
elif 'SUPPORT' in participants[b_doublei[1]]['timeline']['role']:
b_roles[b_doublei[1]] = 'S'
else: # Last resort -> check cs
if 'creepsPerMinDeltas' in participants[b_doublei[0]]['timeline']:
if participants[b_doublei[0]]['timeline']['creepsPerMinDeltas']['0-10'] < \
participants[b_doublei[1]]['timeline']['creepsPerMinDeltas']['0-10']:
b_roles[b_doublei[0]] = 'S'
else:
b_roles[b_doublei[1]] = 'S'
else:
if participants[b_doublei[0]]['stats']['totalMinionsKilled'] < participants[b_doublei[1]]['stats']['totalMinionsKilled']:
b_roles[b_doublei[0]] = 'S'
else:
b_roles[b_doublei[1]] = 'S'
r_doubleRole = Counter(r_roles.values()).most_common(1)[0][0]
r_doublei = [i for i, r in r_roles.items() if r == r_doubleRole]
if len(r_doublei) > 2:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
if 'SUPPORT' in participants[r_doublei[0]]['timeline']['role']:
r_roles[r_doublei[0]] = 'S'
elif 'SUPPORT' in participants[r_doublei[1]]['timeline']['role']:
r_roles[r_doublei[1]] = 'S'
else: # Last resort -> check cs
if 'creepsPerMinDeltas' in participants[r_doublei[0]]['timeline']:
if participants[r_doublei[0]]['timeline']['creepsPerMinDeltas']['0-10'] < \
participants[r_doublei[1]]['timeline']['creepsPerMinDeltas']['0-10']:
r_roles[r_doublei[0]] = 'S'
else:
r_roles[r_doublei[1]] = 'S'
else:
if participants[r_doublei[0]]['stats']['totalMinionsKilled'] < participants[r_doublei[1]]['stats']['totalMinionsKilled']:
r_roles[r_doublei[0]] = 'S'
else:
r_roles[r_doublei[1]] = 'S'
roles = OrderedDict()
roles.update(b_roles)
roles.update(r_roles)
# Draft
DRAFT_ORDER = [0, 5, 6, 1, 2, 7, 8, 3, 4, 9] # This is not exact. This order is not pick order but end-draft order: if some players
# trade, this order is wrong. Unfortunatelly there is no way to know the real pick order. So we just assume people don't trade often and
# that trading does not have a huge impact anyway.
for i in DRAFT_ORDER:
state = OrderedDict(state)
bluePick = i < 5
p = participants[i]
championId = p['championId']
for champ_name, champ_id in ex.mode.CHAMPIONS_ID.items():
if champ_id == championId:
state['s_' + champ_name] = 'B' if bluePick else 'R'
state['p_' + champ_name] = roles[i]
break
for key, value in state.items():
raw_data[key].append(value)
df = | pd.DataFrame(raw_data, columns=ex.mode.COLUMNS) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
PEP 8 -- Style Guide for Python Code
https://www.python.org/dev/peps/pep-0008/
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def GPR(X_train,y_train,X_val,r2,s2):
""" Estimates the output y_val given the input X_val, using the training data
and hyperparameters r2 and s2"""
Nva=X_val.shape[0]
yhat_val=np.zeros((Nva,))
sigmahat_val=np.zeros((Nva,))
for k in range(Nva):
x=X_val[k,:]# k-th point in the validation dataset
A=X_train-np.ones((Ntr,1))*x
dist2=np.sum(A**2,axis=1)
ii=np.argsort(dist2)
ii=ii[0:N-1];
refX=X_train[ii,:]
Z=np.vstack((refX,x))
sc=np.dot(Z,Z.T)# dot products
e=np.diagonal(sc).reshape(N,1)# square norms
D=e+e.T-2*sc# matrix with the square distances
R_N=np.exp(-D/2/r2)+s2*np.identity(N)#covariance matrix
R_Nm1=R_N[0:N-1,0:N-1]#(N-1)x(N-1) submatrix
K=R_N[0:N-1,N-1]# (N-1)x1 column
d=R_N[N-1,N-1]# scalar value
C=np.linalg.inv(R_Nm1)
refY=y_train[ii]
mu=K.T@C@refY# estimation of y_val for X_val[k,:]
sigma2=d-K.T@C@K
sigmahat_val[k]=np.sqrt(sigma2)
yhat_val[k]=mu
return yhat_val,sigmahat_val
def print_result(title,e,y_norm,sy,my):
"""
print_result prints a dataframe containing parameters: mean, standard
deviation, mean squared value, parameter R2 of the error in each of the
three subsets
Parameters
----------
title: tipically the algorithm used to find the regressand
string
e: Nd array containing the errors for training, validation and test datasets
float
y_norm: Nd array containing normalized regressand values for training, validation and test dataset
float
sy: standard deviation of regressand train
float
my: mean of regressand train
float
"""
E_tr=e[0]
E_va=e[1]
E_te=e[2]
y_tr_norm=y_norm[0]
y_va_norm=y_norm[1]
y_te_norm=y_norm[2]
E_tr_mu=E_tr.mean()
E_tr_sig=E_tr.std()
E_tr_MSE=np.mean(E_tr**2)
y_tr=y_tr_norm*sy+my
R2_tr=1-E_tr_sig**2/np.mean(y_tr**2)
E_va_mu=E_va.mean()
E_va_sig=E_va.std()
E_va_MSE=np.mean(E_va**2)
y_va=y_va_norm*sy+my
R2_va=1-E_va_sig**2/np.mean(y_va**2)
E_te_mu=E_te.mean()
E_te_sig=E_te.std()
E_te_MSE=np.mean(E_te**2)
y_te=y_te_norm*sy+my
R2_te=1-E_te_sig**2/np.mean(y_te**2)
rows=['Training','Validation','Test']
cols=['mean','std','MSE','R^2']
p=np.array([[E_tr_mu,E_tr_sig,E_tr_MSE,R2_tr],
[E_va_mu,E_va_sig,E_va_MSE,R2_va],
[E_te_mu,E_te_sig,E_te_MSE,R2_te]])
results=pd.DataFrame(p,columns=cols,index=rows)
print('\n Results for ' + title)
print(results)
plt.close('all')
xx= | pd.read_csv("./data/parkinsons_updrs.csv") | pandas.read_csv |
import warnings
from datetime import datetime
import json
import logging
from time import sleep
import uuid
import time
import sys
import numpy as np
from distutils.version import StrictVersion
from pandas import compat
from pandas.core.api import DataFrame
from pandas.tools.merge import concat
from pandas.core.common import PandasError
from pandas.compat import lzip, bytes_to_str
def _check_google_client_version():
try:
import pkg_resources
except ImportError:
raise ImportError('Could not import pkg_resources (setuptools).')
if compat.PY3:
google_api_minimum_version = '1.4.1'
else:
google_api_minimum_version = '1.2.0'
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution(
'google-api-python-client').version
if (StrictVersion(_GOOGLE_API_CLIENT_VERSION) <
StrictVersion(google_api_minimum_version)):
raise ImportError("pandas requires google-api-python-client >= {0} "
"for Google BigQuery support, "
"current version {1}"
.format(google_api_minimum_version,
_GOOGLE_API_CLIENT_VERSION))
def _test_google_api_imports():
try:
import httplib2 # noqa
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
from oauth2client.client import AccessTokenRefreshError # noqa
from oauth2client.client import OAuth2WebServerFlow # noqa
from oauth2client.client import SignedJwtAssertionCredentials # noqa
from oauth2client.file import Storage # noqa
from oauth2client.tools import run_flow, argparser # noqa
except ImportError as e:
raise ImportError("Missing module required for Google BigQuery "
"support: {0}".format(str(e)))
logger = logging.getLogger('pandas.io.gbq')
logger.setLevel(logging.ERROR)
class InvalidPrivateKeyFormat(PandasError, ValueError):
"""
Raised when provided private key has invalid format.
"""
pass
class AccessDenied(PandasError, ValueError):
"""
Raised when invalid credentials are provided, or tokens have expired.
"""
pass
class DatasetCreationError(PandasError, ValueError):
"""
Raised when the create dataset method fails
"""
pass
class GenericGBQException(PandasError, ValueError):
"""
Raised when an unrecognized Google API Error occurs.
"""
pass
class InvalidColumnOrder(PandasError, ValueError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidPageToken(PandasError, ValueError):
"""
Raised when Google BigQuery fails to return,
or returns a duplicate page token.
"""
pass
class InvalidSchema(PandasError, ValueError):
"""
Raised when the provided DataFrame does
not match the schema of the destination
table in BigQuery.
"""
pass
class NotFoundException(PandasError, ValueError):
"""
Raised when the project_id, table or dataset provided in the query could
not be found.
"""
pass
class StreamingInsertError(PandasError, ValueError):
"""
Raised when BigQuery reports a streaming insert error.
For more information see `Streaming Data Into BigQuery
<https://cloud.google.com/bigquery/streaming-data-into-bigquery>`__
"""
class TableCreationError(PandasError, ValueError):
"""
Raised when the create table method fails
"""
pass
class GbqConnector(object):
scope = 'https://www.googleapis.com/auth/bigquery'
def __init__(self, project_id, reauth=False, verbose=False,
private_key=None):
_check_google_client_version()
_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.verbose = verbose
self.private_key = private_key
self.credentials = self.get_credentials()
self.service = self.get_service()
def get_credentials(self):
if self.private_key:
return self.get_service_account_credentials()
else:
return self.get_user_account_credentials()
def get_user_account_credentials(self):
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import run_flow, argparser
flow = OAuth2WebServerFlow(
client_id=('495642085510-k0tmvj2m941jhre2nbqka17vqpjfddtd'
'.apps.googleusercontent.com'),
client_secret='<KEY>',
scope=self.scope,
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
storage = Storage('bigquery_credentials.dat')
credentials = storage.get()
if credentials is None or credentials.invalid or self.reauth:
credentials = run_flow(flow, storage, argparser.parse_args([]))
return credentials
def get_service_account_credentials(self):
from oauth2client.client import SignedJwtAssertionCredentials
from os.path import isfile
try:
if isfile(self.private_key):
with open(self.private_key) as f:
json_key = json.loads(f.read())
else:
# ugly hack: 'private_key' field has new lines inside,
# they break json parser, but we need to preserve them
json_key = json.loads(self.private_key.replace('\n', ' '))
json_key['private_key'] = json_key['private_key'].replace(
' ', '\n')
if compat.PY3:
json_key['private_key'] = bytes(
json_key['private_key'], 'UTF-8')
return SignedJwtAssertionCredentials(
json_key['client_email'],
json_key['private_key'],
self.scope,
)
except (KeyError, ValueError, TypeError, AttributeError):
raise InvalidPrivateKeyFormat(
"Private key is missing or invalid. It should be service "
"account private key JSON (file path or string contents) "
"with at least two keys: 'client_email' and 'private_key'. "
"Can be obtained from: https://console.developers.google."
"com/permissions/serviceaccounts")
def _print(self, msg, end='\n'):
if self.verbose:
sys.stdout.write(msg + end)
sys.stdout.flush()
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def print_elapsed_seconds(self, prefix='Elapsed', postfix='s.',
overlong=7):
sec = self.get_elapsed_seconds()
if sec > overlong:
self._print('{} {} {}'.format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix='b'):
fmt = "%3.1f %s%s"
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, 'Y', suffix)
def get_service(self):
import httplib2
from apiclient.discovery import build
http = httplib2.Http()
http = self.credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
return bigquery_service
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
status = json.loads(bytes_to_str(ex.content))['error']
errors = status.get('errors', None)
if errors:
for error in errors:
reason = error['reason']
message = error['message']
raise GenericGBQException(
"Reason: {0}, Message: {1}".format(reason, message))
raise GenericGBQException(errors)
def process_insert_errors(self, insert_errors):
for insert_error in insert_errors:
row = insert_error['index']
errors = insert_error.get('errors', None)
for error in errors:
reason = error['reason']
message = error['message']
location = error['location']
error_message = ('Error at Row: {0}, Reason: {1}, '
'Location: {2}, Message: {3}'
.format(row, reason, location, message))
# Report all error messages if verbose is set
if self.verbose:
self._print(error_message)
else:
raise StreamingInsertError(error_message +
'\nEnable verbose logging to '
'see all errors')
raise StreamingInsertError
def run_query(self, query):
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
_check_google_client_version()
job_collection = self.service.jobs()
job_data = {
'configuration': {
'query': {
'query': query
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
}
self._start_timer()
try:
self._print('Requesting query... ', end="")
query_reply = job_collection.insert(
projectId=self.project_id, body=job_data).execute()
self._print('ok.\nQuery running...')
except (AccessTokenRefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid")
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize")
except HttpError as ex:
self.process_http_error(ex)
job_reference = query_reply['jobReference']
while not query_reply.get('jobComplete', False):
self.print_elapsed_seconds(' Elapsed', 's. Waiting...')
try:
query_reply = job_collection.getQueryResults(
projectId=job_reference['projectId'],
jobId=job_reference['jobId']).execute()
except HttpError as ex:
self.process_http_error(ex)
if self.verbose:
if query_reply['cacheHit']:
self._print('Query done.\nCache hit.\n')
else:
bytes_processed = int(query_reply.get(
'totalBytesProcessed', '0'))
self._print('Query done.\nProcessed: {}\n'.format(
self.sizeof_fmt(bytes_processed)))
self._print('Retrieving results...')
total_rows = int(query_reply['totalRows'])
result_pages = list()
seen_page_tokens = list()
current_row = 0
# Only read schema on first page
schema = query_reply['schema']
# Loop through each page of data
while 'rows' in query_reply and current_row < total_rows:
page = query_reply['rows']
result_pages.append(page)
current_row += len(page)
self.print_elapsed_seconds(
' Got page: {}; {}% done. Elapsed'.format(
len(result_pages),
round(100.0 * current_row / total_rows)))
if current_row == total_rows:
break
page_token = query_reply.get('pageToken', None)
if not page_token and current_row < total_rows:
raise InvalidPageToken("Required pageToken was missing. "
"Received {0} of {1} rows"
.format(current_row, total_rows))
elif page_token in seen_page_tokens:
raise InvalidPageToken("A duplicate pageToken was returned")
seen_page_tokens.append(page_token)
try:
query_reply = job_collection.getQueryResults(
projectId=job_reference['projectId'],
jobId=job_reference['jobId'],
pageToken=page_token).execute()
except HttpError as ex:
self.process_http_error(ex)
if current_row < total_rows:
raise InvalidPageToken()
# print basic query stats
self._print('Got {} rows.\n'.format(total_rows))
return schema, result_pages
def load_data(self, dataframe, dataset_id, table_id, chunksize):
from apiclient.errors import HttpError
job_id = uuid.uuid4().hex
rows = []
remaining_rows = len(dataframe)
if self.verbose:
total_rows = remaining_rows
self._print("\n\n")
for index, row in dataframe.reset_index(drop=True).iterrows():
row_dict = dict()
row_dict['json'] = json.loads(row.to_json(force_ascii=False,
date_unit='s',
date_format='iso'))
row_dict['insertId'] = job_id + str(index)
rows.append(row_dict)
remaining_rows -= 1
if (len(rows) % chunksize == 0) or (remaining_rows == 0):
self._print("\rStreaming Insert is {0}% Complete".format(
((total_rows - remaining_rows) * 100) / total_rows))
body = {'rows': rows}
try:
response = self.service.tabledata().insertAll(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
body=body).execute()
except HttpError as ex:
self.process_http_error(ex)
# For streaming inserts, even if you receive a success HTTP
# response code, you'll need to check the insertErrors property
# of the response to determine if the row insertions were
# successful, because it's possible that BigQuery was only
# partially successful at inserting the rows. See the `Success
# HTTP Response Codes
# <https://cloud.google.com/bigquery/
# streaming-data-into-bigquery#troubleshooting>`__
# section
insert_errors = response.get('insertErrors', None)
if insert_errors:
self.process_insert_errors(insert_errors)
sleep(1) # Maintains the inserts "per second" rate per API
rows = []
self._print("\n")
def verify_schema(self, dataset_id, table_id, schema):
from apiclient.errors import HttpError
try:
return (self.service.tables().get(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id
).execute()['schema']) == schema
except HttpError as ex:
self.process_http_error(ex)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
delay = 0
# Changes to table schema may take up to 2 minutes as of May 2015 See
# `Issue 191
# <https://code.google.com/p/google-bigquery/issues/detail?id=191>`__
# Compare previous schema with new schema to determine if there should
# be a 120 second delay
if not self.verify_schema(dataset_id, table_id, table_schema):
self._print('The existing table has a different schema. Please '
'wait 2 minutes. See Google BigQuery issue #191')
delay = 120
table = _Table(self.project_id, dataset_id,
private_key=self.private_key)
table.delete(table_id)
table.create(table_id, table_schema)
sleep(delay)
def _parse_data(schema, rows):
# see:
# http://pandas.pydata.org/pandas-docs/dev/missing_data.html
# #missing-data-casting-rules-and-indexing
dtype_map = {'INTEGER': np.dtype(float),
'FLOAT': np.dtype(float),
# This seems to be buggy without nanosecond indicator
'TIMESTAMP': 'M8[ns]'}
fields = schema['fields']
col_types = [field['type'] for field in fields]
col_names = [str(field['name']) for field in fields]
col_dtypes = [dtype_map.get(field['type'], object) for field in fields]
page_array = np.zeros((len(rows),),
dtype= | lzip(col_names, col_dtypes) | pandas.compat.lzip |
# -*- coding: utf-8 -*-
import sys
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.util import testing as tm
class TestToCSV(object):
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
"(see https://bugs.python.org/issue32255)"))
def test_to_csv_with_single_column(self):
# see gh-18676, https://bugs.python.org/issue32255
#
# Python's CSV library adds an extraneous '""'
# before the newline when the NaN-value is in
# the first row. Otherwise, only the newline
# character is added. This behavior is inconsistent
# and was patched in https://bugs.python.org/pull_request4672.
df1 = DataFrame([None, 1])
expected1 = """\
""
1.0
"""
with tm.ensure_clean('test.csv') as path:
df1.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected1
df2 = DataFrame([1, None])
expected2 = """\
1.0
""
"""
with tm.ensure_clean('test.csv') as path:
df2.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected2
def test_to_csv_defualt_encoding(self):
# GH17097
df = DataFrame({'col': [u"AAAAA", u"ÄÄÄÄÄ", u"ßßßßß", u"聞聞聞聞聞"]})
with tm.ensure_clean('test.csv') as path:
# the default to_csv encoding in Python 2 is ascii, and that in
# Python 3 is uft-8.
if pd.compat.PY2:
# the encoding argument parameter should be utf-8
with tm.assert_raises_regex(UnicodeEncodeError, 'ascii'):
df.to_csv(path)
else:
df.to_csv(path)
tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
def test_to_csv_quotechar(self):
df = DataFrame({'col': [1, 2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$")
with open(path, 'r') as f:
assert f.read() == expected
with tm.ensure_clean('test.csv') as path:
with tm.assert_raises_regex(TypeError, 'quotechar'):
df.to_csv(path, quoting=1, quotechar=None)
def test_to_csv_doublequote(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a""a"
"1","""bb"""
'''
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
from _csv import Error
with tm.ensure_clean('test.csv') as path:
with tm.assert_raises_regex(Error, 'escapechar'):
df.to_csv(path, doublequote=False) # no escapechar set
def test_to_csv_escapechar(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a\\"a"
"1","\\"bb\\""
'''
with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
with open(path, 'r') as f:
assert f.read() == expected
df = DataFrame({'col': ['a,a', ',bb,']})
expected = """\
,col
0,a\\,a
1,\\,bb\\,
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
with open(path, 'r') as f:
assert f.read() == expected
def test_csv_to_string(self):
df = DataFrame({'col': [1, 2]})
expected = ',col\n0,1\n1,2\n'
assert df.to_csv() == expected
def test_to_csv_decimal(self):
# GH 781
df = DataFrame({'col1': [1], 'col2': ['a'], 'col3': [10.1]})
expected_default = ',col1,col2,col3\n0,1,a,10.1\n'
assert df.to_csv() == expected_default
expected_european_excel = ';col1;col2;col3\n0;1;a;10,1\n'
assert df.to_csv(decimal=',', sep=';') == expected_european_excel
expected_float_format_default = ',col1,col2,col3\n0,1,a,10.10\n'
assert df.to_csv(float_format='%.2f') == expected_float_format_default
expected_float_format = ';col1;col2;col3\n0;1;a;10,10\n'
assert df.to_csv(decimal=',', sep=';',
float_format='%.2f') == expected_float_format
# GH 11553: testing if decimal is taken into account for '0.0'
df = pd.DataFrame({'a': [0, 1.1], 'b': [2.2, 3.3], 'c': 1})
expected = 'a,b,c\n0^0,2^2,1\n1^1,3^3,1\n'
assert df.to_csv(index=False, decimal='^') == expected
# same but for an index
assert df.set_index('a').to_csv(decimal='^') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(decimal="^") == expected
def test_to_csv_float_format(self):
# testing if float_format is taken into account for the index
# GH 11553
df = pd.DataFrame({'a': [0, 1], 'b': [2.2, 3.3], 'c': 1})
expected = 'a,b,c\n0,2.20,1\n1,3.30,1\n'
assert df.set_index('a').to_csv(float_format='%.2f') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(
float_format='%.2f') == expected
def test_to_csv_na_rep(self):
# testing if NaN values are correctly represented in the index
# GH 11553
df = DataFrame({'a': [0, np.NaN], 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n0.0,0,2\n_,1,3\n"
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# now with an index containing only NaNs
df = | DataFrame({'a': np.NaN, 'b': [0, 1], 'c': [2, 3]}) | pandas.DataFrame |
from time import time
from xml.etree import ElementTree
import pandas as pd
import os.path
import logging
import sys
from tqdm import tqdm
class Mzid(object):
"""
This class holds the loaded mzIdentml object and extracts peptides and protein labels.
Goal is to get dataframe where identified peptide sequence/z combos are linked to their m/z, protein and spectrum ID
To do so, first pull 5 tables from the mzID file: 1.PSM, 2.Peptide, 3.ProteinGroup, 4.PeptideEvidence, 5.DBSequence
1. PSM contains spectrum_ID, mz, z passThreshold; references to Peptide and PeptideEvidence through Pep_ref & PE_ref
2. Peptide contains the actual amino acid sequence
3. ProteinGroup contains passThreshold, references to DBSequence and PeptideEvidence through DBS_ref and PE_Ref
4. PeptideEvidence contains isDecoy, references to Peptide and DBSequence through Pep_ref and DBS_ref
5. DBSequence contains the Uniprot accession number
From these five tables, output a peptide-centric summary, and a protein-centric summary
Peptide-centric Summary combines PSM and Peptide, contains Sequence, mz, z, spectrumID
Protein-centric Summary reads all 5 tables, should contain Uniprot in addition to peptide-centric summary
"""
def __init__(self, path):
"""
:param path: path of the mzid file to be loaded, e.g., "~/Desktop/example.mzid"
"""
self.logger = logging.getLogger('mzid.mzid')
self.path = os.path.join(path)
self.root = self._parse_file()
self.psm_df = pd.DataFrame() #self.read_psm()
self.peptide_df = pd.DataFrame() #self.read_peptide()
self.protein_df = pd.DataFrame() #self.read_protein()
self.pe_df = pd.DataFrame() #self.read_pe()
self.dbs_df = pd.DataFrame() #self.read_dbs()
self.pep_summary_df = | pd.DataFrame() | pandas.DataFrame |
# Import libraries
import streamlit as st
import pandas as pd
import yfinance as yf
import talib
import plotly.graph_objects as go
import plotly.express as px
import requests
import seaborn as sns
import os
from streamlit_lottie import st_lottie
from sqlalchemy import create_engine
# Import my custom scripts
from patterns import candlestick_patterns
from OptimizePortfolio import optimize_portfolio, calculate_portfolio, getCompanyName
from chart import areaChart, candlestickChart, gaugeChart, pieChart, fundamentalChart
from scan import scanStocks
from db import config
import user
db_string = f"cockroachdb://{config.username}:{config.password}@{config.host}:{config.port}/{config.cluster}.{config.db_name}?sslmode=require"
db = create_engine(db_string)
def calculateSMA(df, window):
df[f'{window}sma'] = df['Close'].rolling(window=window).mean()
def calculateEMA(df, window):
df[f'{window}ema'] = df['Close'].ewm(span=window).mean()
def load_lottieurl(url: str):
r = requests.get(url)
if r.status_code != 200:
return None
return r.json()
def main():
st.title('Stock Tracker')
functionality = st.sidebar.selectbox('What would you like to do?',
('Track Individual Stocks', 'Compare Company Fundamentals', 'Optimize my Portfolio',
'Find Candlestick Patterns', 'Scan for Candlestick Patterns'))
if (functionality == 'Track Individual Stocks'):
st.header('Track Individual Stocks')
ticker = st.sidebar.text_input('Enter ticker symbol', value='AMD')
companyName = getCompanyName(ticker)
df = user.get_db_price(ticker, db)
st.subheader(f'Real-time information for {companyName}')
type = st.sidebar.selectbox('Choose Chart Type', ('Line Chart', 'Candlestick Chart'))
if (type == 'Line Chart'):
plot = areaChart(df, ticker)
st.plotly_chart(plot)
else:
plot = candlestickChart(df, ticker)
st.plotly_chart(plot)
with st.beta_expander("What is a candlestick chart?"):
st.write("""A daily candlestick shows the market's open, high, low, and close price for the day.
When the body of the candlestick is green, it means the close was higher than the open (ie. the price increased).
If the body is red, it means the close was lower than the open (ie. the price decreased).""")
st.image("https://upload.wikimedia.org/wikipedia/commons/e/ea/Candlestick_chart_scheme_03-en.svg", use_column_width="auto")
st.write("Probe-meteo.com, CC BY-SA 3.0 <https://creativecommons.org/licenses/by-sa/3.0>, via Wikimedia Commons")
gauge = gaugeChart(df, ticker)
st.plotly_chart(gauge)
st.subheader(f"Fundamental Analysis of {companyName}")
with st.beta_expander("What is Fundamental Analysis?"):
st.write("""Fundamental analysis (FA) is a method of **measuring a security's intrinsic value**
by examining related economic and financial factors. These factors include macroeconomic
factors such as the state of the economy and industry conditions to microeconomic factors
like the effectiveness of the company's management. The **end goal** is to arrive at a number
that an investor can compare with a security's current price **in order to see whether the
security is undervalued or overvalued.**""")
info = user.get_db_fundamentals(ticker, db)
st.write(f"**_Business Summary_**: {info['longBusinessSummary'].values[0]}")
st.write(f"**_Sector_**: {info['sector'].values[0]}")
st.write(f"**_Shares Outstanding_**: {info['sharesOutstanding'].values[0]}")
with st.beta_expander("Shares Outstanding"):
st.write("""Shares outstanding refer to a company's stock currently held by all its
shareholders, including share blocks held by institutional investors and restricted
shares owned by the company’s officers and insiders.""")
st.write(f"**_Market Capitalization_**: {info['marketCap'].values[0]}")
with st.beta_expander("Market Capitalization"):
st.write("""Market Capitalization is the total dollar value of all of a company's
outstanding shares. It is a measure of corporate size.""")
st.text('Market Capital = Current Market Price * Number Of Shares Outstanding')
st.write(f"**_Price-to-Earnings (P/E) Ratio_**: {info['forwardPE'].values[0]}")
with st.beta_expander("P/E Ratio"):
st.write("""The **price-to-earnings (P/E) ratio** is a metric that helps investors
determine the market value of a stock compared to the company's earnings. The P/E
ratio shows what the market is willing to pay today for a stock based on its past
or future earnings. The P/E ratio is important because it provides a measuring stick
for comparing whether a stock is overvalued or undervalued.""")
st.write("""A **high** P/E ratio could mean that a stock's price is expensive relative to
earnings and **possibly overvalued**. Conversely, a **low** P/E ratio might indicate that
the **current stock price is cheap relative to earnings**.""")
st.text('P/E = Average Common Stock Price / Net Income Per Share')
st.write("""The **Forward P/E** uses forecasted earnings to calculate P/E for the next fiscal
year. If the earnings are expected to grow in the future, the forward P/E will be lower
than the current P/E.""")
st.text('Forward P/E = Current Market Price / Forecasted Earnings Per Share')
st.write(f"**_Dividend Yield_**: {info['dividendYield'].values[0]}")
with st.beta_expander("Dividend Yield"):
st.write("""The dividend yield, expressed as a percentage, is a financial ratio
(dividend/price) that shows how much a company pays out in dividends each year
relative to its stock price.""")
st.text('Dividend Yield = Annual Dividend Per Share / Price Per Share')
st.write("""New companies that are relatively small, but still growing quickly, may pay a
lower average dividend than mature companies in the same sectors. In general, mature
companies that aren't growing very quickly pay the highest dividend yields.""")
st.write(f"**_Beta_**: {info['beta'].values[0]}")
with st.beta_expander("Beta"):
st.write("""Beta is a measure of the volatility—or systematic risk—of a security or portfolio
compared to the market as a whole. It effectively describes the activity of a security's
returns as it responds to swings in the market.""")
st.write("If a stock has a beta of **1.0**, it indicates that its price activity is strongly correlated with the market.")
st.write("""A beta value that is **less than 1.0** means that the security is theoretically
less volatile than the market. Including this stock in a portfolio makes it less risky
than the same portfolio without the stock.""")
st.write("""A beta that is greater than 1.0 indicates that the security's price is
theoretically more volatile than the market. For example, if a stock's beta is
1.2, it is assumed to be 20% more volatile than the market. Technology stocks
and small cap stocks tend to have higher betas than the market benchmark.""")
st.write("""A negative beta shows that the asset inversely follows the market,
meaning it decreases in value if the market goes up and increases if the market goes down.""")
st.subheader("Calculate Moving Averages")
windowSMA = st.slider("Select Simple Moving Average Period", 5, 200)
#st.write(f"{windowSMA} Simple Moving Average selected")
try:
calculateSMA(df, windowSMA)
except Exception as e:
st.write(f"Failed to calculate {windowSMA}SMA.")
windowEMA = st.slider("Select Exponential Moving Average Period", 5, 200)
#st.write(f"{windowEMA} Exponential Moving Average selected")
try:
calculateEMA(df, windowEMA)
except Exception as e:
st.write(f"Failed to calculate {windowEMA}EMA.")
plot = candlestickChart(df, ticker, sma=windowSMA, ema=windowEMA)
st.plotly_chart(plot)
if st.checkbox("Get Real-time News Articles"):
st.subheader(f'Latest {companyName} News')
df = user.get_db_news(ticker, db)
df = df[['news_date', 'headline', 'sentiment', 'url']]
cm = sns.diverging_palette(20, 145, as_cmap=True)
st.dataframe(df.style.background_gradient(cmap=cm))
mean_scores = df.groupby(['news_date']).mean()
mean_scores = mean_scores.xs('sentiment', axis="columns").transpose()
st.subheader('Sentiment Over Time')
st.line_chart(mean_scores)
elif (functionality == 'Compare Company Fundamentals'):
st.header('Compare Company Fundamentals')
lottie_url = load_lottieurl("https://assets10.lottiefiles.com/private_files/lf30_F3v2Nj.json")
st_lottie(lottie_url, height=300)
choice = st.sidebar.selectbox("Which Companies To Compare?", ('Analyze All Companies','Custom'))
if (choice == 'Analyze All Companies'):
df = user.get_all_fundamentals(db)
df = df.head(100)
st.write(df)
metric = st.selectbox('Select Metric to Visualize', ('Stock Price', 'Market Cap', 'Beta', 'Forward P/E', 'Dividend Yield', 'Average Volume'))
number = st.slider('Number of Companies', 5, 20)
order = st.selectbox('Ascending or Descending Order?', ('Ascending', 'Descending'))
if metric == 'Stock Price':
xaxis = 'previousClose'
elif metric == 'Market Cap':
xaxis = 'marketCap'
elif metric == 'Beta':
xaxis = 'beta'
elif metric == 'Forward P/E':
xaxis = 'forwardPE'
elif metric == 'Dividend Yield':
xaxis = 'dividendYield'
elif metric == 'Average Volume':
xaxis = 'averageVolume'
plot = fundamentalChart(df, metric, xaxis, number, order)
st.plotly_chart(plot)
else:
number = st.sidebar.slider('Select Number of Companies to Compare', 2, 10)
tickers = []
for i in range(1, number+1):
ticker = st.sidebar.text_input(f"Enter ticker symbol {i}:")
tickers.append(ticker)
infos = []
fundamentals = ['sector', 'previousClose', 'beta', 'marketCap', 'averageVolume', 'forwardPE', 'dividendYield', 'sharesOutstanding']
if len(tickers) == number:
for ticker in tickers:
print(f"Downloading data for {ticker}")
infos.append(yf.Ticker(ticker).info)
df = pd.DataFrame(infos)
df = df.set_index('symbol')
df = df[df.columns[df.columns.isin(fundamentals)]]
st.write(df)
elif (functionality == 'Optimize my Portfolio'):
st.header('Optimize my Portfolio')
lottie_url = load_lottieurl("https://assets3.lottiefiles.com/packages/lf20_TWo1Pn.json")
st_lottie(lottie_url, height=300)
index = st.sidebar.selectbox('Select Which Companies to Evaluate',
('Dow Jones Industrial Average (DJIA)', 'S&P500', 'S&P100', 'NASDAQ-100'))
portfolio_val = int(st.sidebar.text_input("Enter Amount to Invest", value=10000))
strategy = st.sidebar.selectbox("Select Allocation Strategy",
('Optimize Return & Risk', 'Minimize Risk', 'Custom Risk', 'Custom Return'))
if (index == 'S&P500'):
st.subheader('S&P 500')
st.write('''The S&P 500, or simply the S&P, is a stock market index that measures the
stock performance of 500 large companies listed on stock exchanges in the United
States. It is one of the most commonly followed equity indices. The S&P 500 index
is a capitalization-weighted index and the 10 largest companies in the index account
for 27.5% of the market capitalization of the index. The 10 largest companies in the
index, in order of weighting, are Apple Inc., Microsoft, Amazon.com, Facebook, Tesla,
Inc., Alphabet Inc. (class A & C), Berkshire Hathaway, Johnson & Johnson, and JPMorgan
Chase & Co., respectively.''')
portfolio = pd.read_csv("S&P500.csv", index_col="Date")
elif (index == 'S&P100'):
st.subheader('S&P 100')
st.write('''The S&P 100 Index is a stock market index of United States stocks maintained
by Standard & Poor's. It is a subset of the S&P 500 and includes 101 (because one of
its component companies has 2 classes of stock) leading U.S. stocks. Constituents of
the S&P 100 are selected for sector balance and represent about 67% of the market
capitalization of the S&P 500 and almost 54% of the market capitalization of the U.S.
equity markets as of December 2020. The stocks in the S&P 100 tend to be the largest
and most established companies in the S&P 500.''')
portfolio = pd.read_csv("SP100index.csv", index_col="Date")
with st.beta_expander("The S&P 100 consists of:"):
tickers = portfolio.columns
for ticker in tickers:
st.write(f"* {getCompanyName(ticker)}")
elif (index == 'NASDAQ-100'):
st.subheader('NASDAQ-100')
st.write('''The NASDAQ-100 is a stock market index made up of 102 equity securities issued
by 100 of the largest non-financial companies listed on the Nasdaq stock market.''')
portfolio = | pd.read_csv("NASDAQ.csv", index_col="Date") | pandas.read_csv |
from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
if not is_signed_integer_dtype(target):
# checks/conversions/roundings are delegated to general method
return super()._get_indexer(target, method=method, tolerance=tolerance)
target_array = np.asarray(target)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
allow_fill=allow_fill,
fill_value=fill_value,
**kwargs,
)
def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(
self,
name: Hashable = None,
deep: bool = False,
dtype: Dtype | None = None,
names=None,
):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._rename(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
| nv.validate_max(args, kwargs) | pandas.compat.numpy.function.validate_max |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
#import indicators
#TODO: create module for generating stock signal
## to enable put/append/to_hdf by default store in the table format
pd.set_option('io.hdf.default_format','table')
# define hdf store
stockStore = | pd.HDFStore('../../data/store.h5',complevel=9,complib='blosc') | pandas.HDFStore |
import numpy as np
import pandas as pd
# NOTE NEW VECTORISED MASKING METHOD FOR MONTH AND SEASON COLS
# Much more efficient, but small inconsistency with previous method (at bottom for ref)
def years():
return [i for i in np.arange(1987.0, 2009.0, 1)]
def months():
return | pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from contextlib import contextmanager
from io import StringIO
import sys
sys.path.append("../")
import rfr_model # noqa: E402
import pandas as pd # noqa: E402
import numpy as np # noqa: E402
import pandas.api.types as ptypes # noqa: E402
from sklearn.ensemble import RandomForestRegressor # noqa: E402
from collections import defaultdict # noqa: E402
df_test = pd.read_csv('./unittest_dummy.csv', nrows=5)
X_test, y_test = rfr_model.descriptors_outputs(df_test, d_start=5, o=0)
def test_stratify_df():
'''
'''
b_test = rfr_model.stratify_df(df_test, label_type=1, label_site=4)
assert b_test.shape[1] == 1, \
'array shape is incorrect. should be ({}, 1), got ({}, {})'\
.format(b_test.shape[0], b_test.shape[0], b_test.shape[1])
assert isinstance(b_test, np.ndarray), \
'output type is incorrect, should be of type np array.'
def test_descriptors_outputs():
''
''
X_test, y_test = rfr_model.descriptors_outputs(df_test, d_start=5, o=0)
assert X_test.shape[1] == 5, \
'array shape is incorrect. should be ({}, 7), got ({}, {})'\
.format(X_test.shape[0], X_test.shape[0], X_test.shape[1])
assert all(ptypes.is_numeric_dtype(X_test[col]) for col in
list(X_test[X_test.columns[:]])), \
'data type in columns is of incorrect type, must be numeric'
assert | ptypes.is_numeric_dtype(y_test) | pandas.api.types.is_numeric_dtype |
import os
import pickle
import argparse
import pandas as pd
from tqdm import tqdm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', '-l', type=str, required=True)
parser.add_argument('--output', '-o', type=str, default='results.pkl')
parser.add_argument('--eval_files', '-f', nargs='+', type=str,
default=['test_accuracy.txt', 'best_val_result.txt'],
help='list of evaluation files to read and add to the df')
parser.add_argument('--eval_names', '-n', nargs='+', type=str,
default=['test_accuracy', 'val_accuracy'],
help='names to give to results in evaluation files')
args = parser.parse_args()
print(args)
assert len(args.eval_files) == len(args.eval_names)
logs = os.listdir(args.log_dir)
df = | pd.DataFrame() | pandas.DataFrame |
# Initial imports
import freelancer_sdk_v2 as api
import os
import json
import pandas as pd
import time
from datetime import datetime
import pymysql
from sqlalchemy import create_engine
from db_config import db_config
from collections import deque
from freelancer_sdk_v2.session import Session
from freelancer_sdk_v2.resources.projects.projects import get_reviews
# Create session
token = ''
session = Session(oauth_token=token)
# Get all reviews for a project
def get_review_data(project_id):
url = os.environ.get('FLN_URL')
oauth_token = os.environ.get('FLN_OAUTH_TOKEN')
session = Session(oauth_token=token, url=url)
try:
review_data = get_reviews(session, project_id)
return review_data
except ReviewNotFoundException:
return None
# Initialize local SQL connection
cnx = create_engine('mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8mb4'.format(db_config['user'],
db_config['pass'],
db_config['host'],
db_config['port'],
db_config['db']),
echo=False)
# Import 100 project IDs that still have to be parsed
# projects_result = cnx.execute("SELECT project_id FROM successful_bids WHERE project_id NOT IN (SELECT * FROM processed_projects_reviews) LIMIT 100")
projects_result = cnx.execute("SELECT project_id FROM successful_bids LIMIT 50")
# Initialize empty list
list_of_projects = []
# Iterate through output of SQL query, and add IDs to list
for row in projects_result:
list_of_projects.append(row['project_id'])
# Create queue of projects from list of IDs
project_queue = deque(list_of_projects)
# Initialize empty dataframe
reviewdf = pd.DataFrame()
# Parse projects until queue is empty
while project_queue:
project_id = project_queue.pop()
review_data = get_review_data(project_id)
# Check that API call didn't fail
if review_data is not None:
# Check that at least one review was found
if len(review_data['reviews']) > 0:
# Iterate through reviews in response and add them to DF
for review in review_data['reviews']:
tempdf = pd.io.json.json_normalize(review)
tempdf.columns = tempdf.columns.map(lambda x: x.split(".")[-1])
tempdf = tempdf.loc[:,~tempdf.columns.duplicated()]
# Convert the DF to str for successful storage in DB
tempdf = tempdf.astype(str)
# Append tempdf to main DF
reviewdf = reviewdf.append(tempdf)
# If project data wasn't found, do nothing
else:
pass
time.sleep(2)
# Store projects that were processed in dedicated table
| pd.Series(list_of_projects) | pandas.Series |
import os
import pandas as pd
import imageio
import numpy as np
from PIL import ImageFont, ImageDraw, Image
import glob
import unicodedata
import re
import enum
FONT_DIR = 'data/chinese/ttf'
DEFAULT_INDEX = 'data/chinese/rendered_chinese_characters.csv'
DEFAULT_VARIANT_LOOKUP_FILE = 'data/chinese/Unihan_Variants.txt'
DEFAULT_VARIANT_IMAGE_FILE = 'data/chinese/rendered_variants.csv'
RENDERED_IMAGE_DIR = 'data/chinese/rendered'
@enum.unique
class CharacterClass(enum.Enum):
FAKE = 0
TRADITIONAL = 1
SIMPLIFIED = 2
def classify_character_from_filename(fname):
fname_lower = fname.lower()
if 'traditional' in fname_lower:
return CharacterClass.TRADITIONAL
elif 'simplified' in fname_lower:
return CharacterClass.SIMPLIFIED
return CharacterClass.UNKNOWN
def build_index(df, fonts, out_dir):
files = []
for font in fonts:
font_name = os.path.basename(os.path.splitext(font)[0])
for idx, row in df.iterrows():
trad_char = row['traditional_char']
simp_char = row['simplified_char']
trad_file = os.path.normpath(os.path.join(out_dir, font_name, trad_char + '.png'))
simp_file = os.path.normpath(os.path.join(out_dir, font_name, simp_char + '.png'))
is_both = trad_char == simp_char
if os.path.exists(trad_file):
files.append({
'font': font_name,
#'class': CharacterClass.BOTH if is_both else CharacterClass.TRADITIONAL,
'class_name': CharacterClass.TRADITIONAL.name,
'class_code': CharacterClass.TRADITIONAL.value,
'character': trad_char,
'file': trad_file
})
if (not is_both) and os.path.exists(simp_file):
files.append({
'font': font_name,
'class_name': CharacterClass.SIMPLIFIED.name,
'class_code': CharacterClass.SIMPLIFIED.value,
'character': simp_char,
'file': simp_file
})
df = pd.DataFrame.from_records(files).drop_duplicates(ignore_index=True)
lut = font_lut(df['font'])
df['font_code'] = df['font'].apply(lambda x: lut[x])
return df
def string_code(s):
return re.sub('<.*', '', s.split()[0])
def decode_string(s):
return s.replace('U+','\\u').encode('latin1').decode('unicode-escape')
def valid_range(val):
vint = int(val[2:],16)
valid_ranges = [0x4E00, 0x62FF], [0x6300, 0x77FF], [0x7800, 0x8CFF], [0x8D00, 0x9FFF]
return any( (vint >= vr[0]) & (vint <= vr[1]) for vr in valid_ranges )
def font_lut(series):
font_names = series.unique()
font_names = ["FAKE"] + sorted(font_names)
return dict(zip(font_names, range(len(font_names))))
def load_variant_lookup(path=DEFAULT_VARIANT_LOOKUP_FILE):
df = | pd.read_csv(path, comment='#', sep='\t', header=None, names=['from_code','type','to_code']) | pandas.read_csv |
import json
import os
import numpy as np
import pandas as pd
if __name__ == '__main__':
# Set initial random seed and get DATA variable from environment
seed_val = 3
DATA_DIR = os.environ['DATA']
np.random.seed(seed_val)
# Get list of all train data files
train_data_dir = os.path.join(DATA_DIR, 'train')
all_train_files = os.listdir(train_data_dir)
np.random.shuffle(all_train_files)
# Get n_samples per bucket
n_samples = len(all_train_files)
n_buckets = 10
n_samples_per_buc = int(n_samples / n_buckets)
# Build dictionary bucket_samples_map
bucket_samples_map = {}
for bucket_id in range(n_buckets):
st_ind = bucket_id * n_samples_per_buc
en_ind = (bucket_id + 1) * n_samples_per_buc
samples_in_curr_buc = []
for sample_ind in range(st_ind, en_ind):
sample_name = all_train_files[sample_ind]
sample_path = os.path.join(train_data_dir, sample_name)
with open(sample_path) as f:
js = json.loads(f.read())
f.close()
article = js['article']
if article is None or len(article) == 0:
print('File {} has no article in it'.format(sample_ind))
del js
continue
del js
samples_in_curr_buc.append(sample_name)
print ('No of samples in bucket: {} = {}'.format(bucket_id, len(samples_in_curr_buc)))
bucket_samples_map[bucket_id] = samples_in_curr_buc
# Check that each bucket has unique elements
for buc_i in range(n_buckets):
buc_i_samples = set(bucket_samples_map[buc_i])
for buc_j in range(buc_i+1, n_buckets):
buc_j_samples = set(bucket_samples_map[buc_j])
intersection_samples = buc_i_samples.intersection(buc_j_samples)
if len(intersection_samples) > 0:
print ('Error: one or more bucket has overlap')
# Save a csv file cross_rev_buc_ind.csv
for bucket_id in range(n_buckets):
bucket_file_path = os.path.join(DATA_DIR, 'cross_rev_bucket_{}.csv'.format(str(bucket_id)))
df = | pd.DataFrame() | pandas.DataFrame |
"""Base class for modeling portfolio and measuring its performance.
The job of the `Portfolio` class is to create a series of positions allocated
against a cash component, produce an equity curve, incorporate basic transaction costs
and produce a set of statistics about its performance. In particular it outputs
position/profit metrics and drawdown information.
## Workflow
The workflow of `Portfolio` is simple:
1. Receives a set of inputs, such as entry and exit signals
2. Uses them to generate and fill orders in form of records (simulation part)
3. Calculates a broad range of risk & performance metrics based on these records (analysis part)
It basically builds upon the `vectorbt.portfolio.orders.Orders` class. To simplify creation of order
records and keep track of balances, it exposes several convenience methods with prefix `from_`.
For example, you can use `Portfolio.from_signals` method to generate orders from entry and exit signals.
Alternatively, you can use `Portfolio.from_order_func` to run a custom order function on each tick.
The results are then automatically passed to the constructor method of `Portfolio` and you will
receive a portfolio instance ready to be used for performance analysis.
This way, one can simulate and analyze his/her strategy in a couple of lines.
### Example
The following example does something crazy: it checks candlestick data of 6 major cryptocurrencies
in 2020 against every single pattern found in TA-Lib, and translates them into signals:
```python-repl
>>> import numpy as np
>>> import pandas as pd
>>> from datetime import datetime
>>> import talib
>>> import vectorbt as vbt
>>> # Fetch price history
>>> symbols = ['BTC-USD', 'ETH-USD', 'XRP-USD', 'BNB-USD', 'BCH-USD', 'LTC-USD']
>>> start = datetime(2020, 1, 1)
>>> end = datetime(2020, 9, 1)
>>> ohlcv_by_symbol = vbt.utils.data.download(symbols, start=start, end=end)
>>> # Put assets into a single dataframe by price type
>>> ohlcv = vbt.utils.data.concat_symbols(ohlcv_by_symbol)
>>> ohlcv['Open'].head()
symbol BTC-USD ETH-USD XRP-USD BNB-USD BCH-USD \
Date
2019-12-31 7294.438965 132.612274 0.194518 13.952087 209.301987
2020-01-01 7194.892090 129.630661 0.192912 13.730962 204.671295
2020-01-02 7202.551270 130.820038 0.192708 13.698126 204.354538
2020-01-03 6984.428711 127.411263 0.187948 13.035329 196.007690
2020-01-04 7345.375488 134.168518 0.193521 13.667442 222.536560
symbol LTC-USD
Date
2019-12-31 42.766113
2020-01-01 41.326534
2020-01-02 42.018085
2020-01-03 39.863129
2020-01-04 42.383526
>>> # Run every single pattern recognition indicator and combine results
>>> result = pd.DataFrame.vbt.empty_like(ohlcv['Open'], fill_value=0.)
>>> for pattern in talib.get_function_groups()['Pattern Recognition']:
... PRecognizer = vbt.IndicatorFactory.from_talib(pattern)
... pr = PRecognizer.run(ohlcv['Open'], ohlcv['High'], ohlcv['Low'], ohlcv['Close'])
... result = result + pr.integer
>>> # Don't look into future
>>> result = result.vbt.fshift(1)
>>> # Treat each number as order value in USD
>>> size = result / ohlcv['Open']
>>> # Simulate portfolio
>>> portfolio = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001)
>>> # Visualize portfolio value
>>> portfolio.value().vbt.plot()
```

## Broadcasting
`Portfolio` is very flexible towards inputs:
* Accepts both Series and DataFrames as inputs
* Broadcasts inputs to the same shape using vectorbt's own broadcasting rules
* Many inputs (such as `fees`) can be passed as a single value, value per column/row, or as a matrix
* Implements flexible indexing wherever possible to save memory
## Grouping
One of the key features of `Portfolio` is the ability to group columns. Groups can be specified by
`group_by`, which can be anything from positions or names of column levels, to a NumPy array with
actual groups. Groups can be formed to share capital between columns or to compute metrics
for a combined portfolio of multiple independent columns.
For example, let's divide our portfolio into two groups sharing the same cash:
```python-repl
>>> # Simulate combined portfolio
>>> group_by = pd.Index([
... 'first', 'first', 'first',
... 'second', 'second', 'second'
... ], name='group')
>>> comb_portfolio = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001,
... group_by=group_by, cash_sharing=True)
>>> # Get total profit per group
>>> comb_portfolio.total_profit()
group
first 21891.431061
second 7575.676246
dtype: float64
```
Not only can you analyze each group, but also each column in the group:
```python-repl
>>> # Get total profit per column
>>> comb_portfolio.total_profit(group_by=False)
symbol
BTC-USD 5163.844396
ETH-USD 13368.521326
XRP-USD 3359.065339
BNB-USD 4724.565229
BCH-USD -259.592709
LTC-USD 3110.703726
dtype: float64
```
In the same way, you can introduce new grouping to the method itself:
```python-repl
>>> # Get total profit per group
>>> portfolio.total_profit(group_by=group_by)
group
first 21891.431061
second 7575.676246
dtype: float64
```
!!! note
If cash sharing is enabled, grouping can be disabled but cannot be modified.
## Indexing
In addition, you can use pandas indexing on the `Portfolio` class itself, which forwards
indexing operation to each argument with index:
```python-repl
>>> portfolio['BTC-USD']
<vectorbt.portfolio.base.Portfolio at 0x7fac7517ac88>
>>> portfolio['BTC-USD'].total_profit()
5163.844396244112
```
Combined portfolio is indexed by group:
```python-repl
>>> comb_portfolio['first']
<vectorbt.portfolio.base.Portfolio at 0x7fac5756b828>
>>> comb_portfolio['first'].total_profit()
21891.43106080097
```
!!! note
Changing index (time axis) is not supported. The object should be treated as a Series
rather than a DataFrame; for example, use `portfolio.iloc[0]` instead of `portfolio.iloc[:, 0]`.
Indexing behavior depends solely upon `vectorbt.base.array_wrapper.ArrayWrapper`.
For example, if `group_select` is enabled indexing will be performed on groups,
otherwise on single columns. You can pass wrapper arguments with `wrapper_kwargs`.
## Logging
To collect more information on how a specific order was processed or to be able to track the whole
simulation from the beginning to the end, you can turn on logging.
```python-repl
>>> # Simulate portfolio with logging
>>> portfolio = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001, log=True)
>>> portfolio.logs.records
id idx col group cash_now shares_now val_price_now value_now \
0 0 0 0 0 inf 0.000000 7294.438965 inf
... ... ... ... ... ... ... ... ...
1463 1463 243 5 5 inf 271.629075 62.844059 inf
size size_type ... log new_cash new_shares res_size \
0 NaN 0 ... True inf 0.000000 NaN
... ... ... ... ... ... ... ...
1463 7.956202 0 ... True inf 279.585277 7.956202
res_price res_fees res_side res_status res_status_info order_id
0 NaN NaN -1 1 0 -1
... ... ... ... ... ... ...
1463 62.906903 0.5005 0 0 -1 1075
[1464 rows x 30 columns]
```
Just as orders, logs are also records and thus can be easily analyzed:
```python-repl
>>> from vectorbt.portfolio.enums import OrderStatus
>>> portfolio.logs.map_field('res_status', value_map=OrderStatus).value_counts()
symbol BTC-USD ETH-USD XRP-USD BNB-USD BCH-USD LTC-USD
Ignored 59 72 66 66 66 59
Filled 185 172 178 178 178 185
```
Logging can also be turned on just for one order, row, or column, since as many other
variables it's specified per order and can broadcast automatically.
!!! note
Logging can slow down simulation.
## Caching
`Portfolio` heavily relies upon caching. If a method or a property requires heavy computation,
it's wrapped with `vectorbt.utils.decorators.cached_method` and `vectorbt.utils.decorators.cached_property`
respectively. Caching can be disabled globally via `vectorbt.settings`.
!!! note
Because of caching, class is meant to be immutable and all properties are read-only.
To change any attribute, use the `copy` method and pass the attribute as keyword argument.
If you're running out of memory when working with large arrays, make sure to disable caching
and then store most important time series manually. For example, if you're interested in Sharpe
ratio or other metrics based on returns, run and save `Portfolio.returns` and then use the
`vectorbt.returns.accessors.ReturnsAccessor` to analyze them. Do not use methods akin to
`Portfolio.sharpe_ratio` because they will re-calculate returns each time.
Alternatively, you can precisely point at attributes and methods that should or shouldn't
be cached. For example, you can blacklist the entire `Portfolio` class except a few most called
methods such as `Portfolio.cash_flow` and `Portfolio.share_flow`:
```python-repl
>>> vbt.settings.caching['blacklist'].append('Portfolio')
>>> vbt.settings.caching['whitelist'].extend([
... 'Portfolio.cash_flow',
... 'Portfolio.share_flow'
... ])
```
Define rules for one instance of `Portfolio`:
```python-repl
>>> vbt.settings.caching['blacklist'].append(portfolio)
>>> vbt.settings.caching['whitelist'].extend([
... portfolio.cash_flow,
... portfolio.share_flow
... ])
```
!!! note
Note that the above approach doesn't work for cached properties.
Use tuples of the instance and the property name instead, such as `(portfolio, 'orders')`.
To reset caching:
```python-repl
>>> vbt.settings.caching.reset()
```
"""
import numpy as np
import pandas as pd
from inspect import signature
from collections import OrderedDict
import warnings
from vectorbt.utils import checks
from vectorbt.utils.decorators import cached_property, cached_method
from vectorbt.utils.enum import convert_str_enum_value
from vectorbt.utils.config import merge_dicts
from vectorbt.utils.random import set_seed
from vectorbt.utils.colors import adjust_opacity
from vectorbt.utils.widgets import make_subplots
from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast, broadcast_to
from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.drawdowns import Drawdowns
from vectorbt.signals.generators import RAND, RPROB
from vectorbt.portfolio import nb
from vectorbt.portfolio.orders import Orders
from vectorbt.portfolio.trades import Trades, Positions
from vectorbt.portfolio.logs import Logs
from vectorbt.portfolio.enums import (
InitCashMode,
CallSeqType,
SizeType,
ConflictMode,
Direction
)
def _mean_agg_func(df):
"""Compute mean for `Portfolio.stats`."""
return df.mean(axis=0)
def add_returns_methods(func_names):
"""Class decorator to add `vectorbt.returns.accessors.ReturnsAccessor` methods to `Portfolio`."""
def wrapper(cls):
for func_name in func_names:
if isinstance(func_name, tuple):
ret_func_name = func_name[0]
else:
ret_func_name = func_name
def returns_method(
self,
*args,
group_by=None,
year_freq=None,
ret_func_name=ret_func_name,
active_returns=False,
in_sim_order=False,
reuse_returns=None,
**kwargs):
if reuse_returns is not None:
returns = reuse_returns
else:
if active_returns:
returns = self.active_returns(group_by=group_by)
else:
returns = self.returns(group_by=group_by, in_sim_order=in_sim_order)
returns_acc = returns.vbt.returns(freq=self.wrapper.freq, year_freq=year_freq)
# Select only those arguments in kwargs that are also in the method's signature
# This is done for Portfolio.stats which passes the same kwargs to multiple methods
method = getattr(returns_acc, ret_func_name)
sig = signature(method)
arg_names = [p.name for p in sig.parameters.values() if p.kind == p.POSITIONAL_OR_KEYWORD]
new_kwargs = {}
for arg_name in arg_names:
if arg_name in kwargs:
new_kwargs[arg_name] = kwargs[arg_name]
return method(*args, **new_kwargs)
if isinstance(func_name, tuple):
func_name = func_name[1]
returns_method.__name__ = func_name
returns_method.__qualname__ = f"Portfolio.{func_name}"
returns_method.__doc__ = f"See `vectorbt.returns.accessors.ReturnsAccessor.{ret_func_name}`."
setattr(cls, func_name, cached_method(returns_method))
return cls
return wrapper
@add_returns_methods([
('daily', 'daily_returns'),
('annual', 'annual_returns'),
('cumulative', 'cumulative_returns'),
('annualized', 'annualized_return'),
'annualized_volatility',
'calmar_ratio',
'omega_ratio',
'sharpe_ratio',
'deflated_sharpe_ratio',
'downside_risk',
'sortino_ratio',
'information_ratio',
'beta',
'alpha',
'tail_ratio',
'value_at_risk',
'conditional_value_at_risk',
'capture',
'up_capture',
'down_capture',
'drawdown',
'max_drawdown'
])
class Portfolio(Wrapping):
"""Class for modeling portfolio and measuring its performance.
Args:
wrapper (ArrayWrapper): Array wrapper.
See `vectorbt.base.array_wrapper.ArrayWrapper`.
close (array_like): Reference price, such as close.
order_records (array_like): A structured NumPy array of order records.
log_records (array_like): A structured NumPy array of log records.
init_cash (InitCashMode, float or array_like of float): Initial capital.
cash_sharing (bool): Whether to share cash within the same group.
call_seq (array_like of int): Sequence of calls per row and group.
incl_unrealized (bool): Whether to include unrealized P&L in statistics.
use_filled_close (bool): Whether to forward-backward fill NaN values in `close`.
Doesn't affect simulation and only used for total profit and market value.
See `Portfolio.fill_close`.
!!! note
Use class methods with `from_` prefix to build a portfolio.
The `__init__` method is reserved for indexing purposes.
!!! note
This class is meant to be immutable. To change any attribute, use `Portfolio.copy`."""
def __init__(self, wrapper, close, order_records, log_records, init_cash,
cash_sharing, call_seq, incl_unrealized=None, use_filled_close=None):
Wrapping.__init__(
self,
wrapper,
close=close,
order_records=order_records,
log_records=log_records,
init_cash=init_cash,
cash_sharing=cash_sharing,
call_seq=call_seq,
incl_unrealized=incl_unrealized,
use_filled_close=use_filled_close
)
# Get defaults
from vectorbt import settings
if incl_unrealized is None:
incl_unrealized = settings.portfolio['incl_unrealized']
if use_filled_close is None:
use_filled_close = settings.portfolio['use_filled_close']
# Store passed arguments
self._close = broadcast_to(close, wrapper.dummy(group_by=False))
self._order_records = order_records
self._log_records = log_records
self._init_cash = init_cash
self._cash_sharing = cash_sharing
self._call_seq = call_seq
self._incl_unrealized = incl_unrealized
self._use_filled_close = use_filled_close
def _indexing_func(self, pd_indexing_func):
"""Perform indexing on `Portfolio`."""
new_wrapper, _, group_idxs, col_idxs = \
self.wrapper._indexing_func_meta(pd_indexing_func, column_only_select=True)
new_close = new_wrapper.wrap(to_2d(self.close, raw=True)[:, col_idxs], group_by=False)
new_order_records = self.orders._col_idxs_records(col_idxs)
new_log_records = self.logs._col_idxs_records(col_idxs)
if isinstance(self._init_cash, int):
new_init_cash = self._init_cash
else:
new_init_cash = to_1d(self._init_cash, raw=True)[group_idxs if self.cash_sharing else col_idxs]
new_call_seq = self.call_seq.values[:, col_idxs]
return self.copy(
wrapper=new_wrapper,
close=new_close,
order_records=new_order_records,
log_records=new_log_records,
init_cash=new_init_cash,
call_seq=new_call_seq
)
# ############# Class methods ############# #
@classmethod
def from_holding(cls, close, **kwargs):
"""Simulate portfolio from holding.
Based on `Portfolio.from_signals`."""
return cls.from_signals(close, True, False, accumulate=False, **kwargs)
@classmethod
def from_random(cls, close, n=None, prob=None, entry_prob=None, exit_prob=None,
param_product=False, seed=None, **kwargs):
"""Simulate portfolio from random entry and exit signals.
Generates signals based either on the number of signals `n` or the probability
of encountering a signal `prob`.
If `n` is set, see `vectorbt.signals.generators.RAND`.
If `prob` is set, see `vectorbt.signals.generators.RPROB`.
Based on `Portfolio.from_signals`."""
from vectorbt import settings
if entry_prob is None:
entry_prob = prob
if exit_prob is None:
exit_prob = prob
if seed is None:
seed = settings.portfolio['seed']
if n is not None and (entry_prob is not None or exit_prob is not None):
raise ValueError("Either n or entry_prob and exit_prob should be set")
if n is not None:
rand = RAND.run(
n=n,
input_shape=close.shape,
input_index=close.vbt.wrapper.index,
input_columns=close.vbt.wrapper.columns,
seed=seed
)
entries = rand.entries
exits = rand.exits
elif entry_prob is not None and exit_prob is not None:
rprob = RPROB.run(
entry_prob=entry_prob,
exit_prob=exit_prob,
param_product=param_product,
input_shape=close.shape,
input_index=close.vbt.wrapper.index,
input_columns=close.vbt.wrapper.columns,
seed=seed
)
entries = rprob.entries
exits = rprob.exits
else:
raise ValueError("At least n or entry_prob and exit_prob should be set")
return cls.from_signals(close, entries, exits, seed=seed, **kwargs)
@classmethod
def from_signals(cls, close, entries, exits, size=None, size_type=None, direction=None, price=None,
fees=None, fixed_fees=None, slippage=None, min_size=None, max_size=None,
reject_prob=None, allow_partial=None, raise_reject=None, accumulate=None, log=None,
conflict_mode=None, close_first=None, val_price=None, init_cash=None, cash_sharing=None,
call_seq=None, max_orders=None, max_logs=None, seed=None, group_by=None,
broadcast_kwargs=None, wrapper_kwargs=None, freq=None, **kwargs):
"""Simulate portfolio from entry and exit signals.
Starting with initial cash `init_cash`, for each signal in `entries`, enters a long/short position
by buying/selling `size` of shares. For each signal in `exits`, closes the position by
selling/buying shares. Depending upon accumulation, each entry signal may increase
the position and each exit signal may decrease the position. When both entry and exit signals
are present, ignores them by default. When grouping is enabled with `group_by`, will compute
the performance of the entire group. When `cash_sharing` is enabled, will share the cash among
all columns in the group.
Args:
close (array_like): Reference price, such as close.
Will broadcast.
Will be used for calculating unrealized P&L and portfolio value.
entries (array_like of bool): Boolean array of entry signals.
Will broadcast.
Becomes a long signal if `direction` is `all` or `longonly`, otherwise short.
exits (array_like of bool): Boolean array of exit signals.
Will broadcast.
Becomes a short signal if `direction` is `all` or `longonly`, otherwise long.
size (float or array_like): Size to order.
Will broadcast.
* Set to any number to buy/sell some fixed amount of shares.
Longs are limited by cash in the account, while shorts are unlimited.
* Set to `np.inf` to buy shares for all cash, or `-np.inf` to sell shares for
initial margin of 100%. If `direction` is not `all`, `-np.inf` will close the position.
* Set to `np.nan` or 0 to skip.
!!! note
Sign will be ignored.
size_type (SizeType or array_like): See `vectorbt.portfolio.enums.SizeType`.
Will broadcast.
Only `SizeType.Shares` and `SizeType.Percent` are supported.
Other modes such as target percentage are not compatible with signals since
their logic may contradict the direction of the signal.
!!! note
`SizeType.Percent` does not support position reversal. Switch to a single
direction or use `close_first`.
See warning on `size_type` in `Portfolio.from_orders`.
direction (Direction or array_like): See `vectorbt.portfolio.enums.Direction`.
Will broadcast.
price (array_like of float): Order price.
Defaults to `close`. Will broadcast.
fees (float or array_like): Fees in percentage of the order value.
Will broadcast. Note that 0.01 = 1%.
fixed_fees (float or array_like): Fixed amount of fees to pay per order.
Will broadcast.
slippage (float or array_like): Slippage in percentage of price.
Will broadcast. Note that 0.01 = 1%.
min_size (float or array_like): Minimum size for an order to be accepted.
Will broadcast.
max_size (float or array_like): Maximum size for an order.
Will broadcast.
Will be partially filled if exceeded. You might not be able to properly close
the position if accumulation is enabled and `max_size` is too low.
reject_prob (float or array_like): Order rejection probability.
Will broadcast.
allow_partial (bool or array_like): Whether to allow partial fills.
Will broadcast.
Does not apply when size is `np.inf`.
raise_reject (bool or array_like): Whether to raise an exception if order gets rejected.
Will broadcast.
log (bool or array_like): Whether to log orders.
Will broadcast.
accumulate (bool or array_like): Whether to accumulate signals.
Will broadcast.
Behaves similarly to `Portfolio.from_orders`.
conflict_mode (ConflictMode or array_like): See `vectorbt.portfolio.enums.ConflictMode`.
Will broadcast.
close_first (bool or array_like): Whether to close the position first before reversal.
Will broadcast.
Otherwise reverses the position with a single order and within the same tick.
Takes only effect under `Direction.All`. Requires a second signal to enter
the opposite position. This allows to define parameters such as `fixed_fees` for long
and short positions separately.
val_price (array_like of float): Asset valuation price.
Defaults to `price` if set, otherwise to previous `close`.
See `val_price` in `Portfolio.from_orders`.
init_cash (InitCashMode, float or array_like of float): Initial capital.
See `init_cash` in `Portfolio.from_order_func`.
cash_sharing (bool): Whether to share cash within the same group.
See `cash_sharing` in `Portfolio.from_orders`.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
See `call_seq` in `Portfolio.from_orders`.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape if any of the `log` is True,
otherwise to 1.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case `close.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
All broadcastable arguments will be broadcast using `vectorbt.base.reshape_fns.broadcast`
but keep original shape to utilize flexible indexing and to save memory.
For defaults, see `vectorbt.settings.portfolio`.
!!! hint
If you generated signals using close price, don't forget to shift your signals by one tick
forward, for example, with `signals.vbt.fshift(1)`. In general, make sure to use a price
that comes after the signal.
Also see notes and hints for `Portfolio.from_orders`.
## Example
Some of the ways of how signals are interpreted:
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> entries = pd.Series([True, True, True, False, False])
>>> exits = pd.Series([False, False, True, True, True])
>>> # Entry opens long, exit closes long
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='longonly')
>>> portfolio.share_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 0.0
dtype: float64
>>> # Entry opens short, exit closes short
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='shortonly')
>>> portfolio.share_flow()
0 -1.0
1 0.0
2 0.0
3 1.0
4 0.0
dtype: float64
>>> # Entry opens long and closes short, exit closes long and opens short
>>> # Reversal within one tick
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all')
>>> portfolio.share_flow()
0 1.0
1 0.0
2 0.0
3 -2.0
4 0.0
dtype: float64
>>> # Reversal within two ticks
>>> # First signal closes position, second signal opens the opposite one
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... close_first=True)
>>> portfolio.share_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 -1.0
dtype: float64
>>> # If entry and exit, chooses exit
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... close_first=True, conflict_mode='exit')
>>> portfolio.share_flow()
0 1.0
1 0.0
2 -1.0
3 -1.0
4 0.0
dtype: float64
>>> # Entry means long order, exit means short order
>>> # Acts similar to `from_orders`
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... accumulate=True)
>>> portfolio.share_flow()
0 1.0
1 1.0
2 0.0
3 -1.0
4 -1.0
dtype: float64
>>> # Testing multiple parameters (via broadcasting)
>>> from vectorbt.portfolio.enums import Direction
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, direction=[list(Direction)],
... broadcast_kwargs=dict(columns_from=Direction._fields))
>>> portfolio.share_flow()
Long Short All
0 100.0 -100.0 100.0
1 0.0 0.0 0.0
2 0.0 0.0 0.0
3 -100.0 50.0 -200.0
4 0.0 0.0 0.0
```
"""
# Get defaults
from vectorbt import settings
if size is None:
size = settings.portfolio['size']
if size_type is None:
size_type = settings.portfolio['signal_size_type']
size_type = convert_str_enum_value(SizeType, size_type)
if direction is None:
direction = settings.portfolio['signal_direction']
direction = convert_str_enum_value(Direction, direction)
if price is None:
price = close
if fees is None:
fees = settings.portfolio['fees']
if fixed_fees is None:
fixed_fees = settings.portfolio['fixed_fees']
if slippage is None:
slippage = settings.portfolio['slippage']
if min_size is None:
min_size = settings.portfolio['min_size']
if max_size is None:
max_size = settings.portfolio['max_size']
if reject_prob is None:
reject_prob = settings.portfolio['reject_prob']
if allow_partial is None:
allow_partial = settings.portfolio['allow_partial']
if raise_reject is None:
raise_reject = settings.portfolio['raise_reject']
if log is None:
log = settings.portfolio['log']
if accumulate is None:
accumulate = settings.portfolio['accumulate']
if conflict_mode is None:
conflict_mode = settings.portfolio['conflict_mode']
conflict_mode = convert_str_enum_value(ConflictMode, conflict_mode)
if close_first is None:
close_first = settings.portfolio['close_first']
if val_price is None:
if price is None:
if checks.is_pandas(close):
val_price = close.vbt.fshift(1)
else:
val_price = np.require(close, dtype=np.float_)
val_price = np.roll(val_price, 1, axis=0)
val_price[0] = np.nan
else:
val_price = price
if init_cash is None:
init_cash = settings.portfolio['init_cash']
init_cash = convert_str_enum_value(InitCashMode, init_cash)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = settings.portfolio['cash_sharing']
if call_seq is None:
call_seq = settings.portfolio['call_seq']
call_seq = convert_str_enum_value(CallSeqType, call_seq)
auto_call_seq = False
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
call_seq = CallSeqType.Default
auto_call_seq = True
if seed is None:
seed = settings.portfolio['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = settings.portfolio['freq']
if broadcast_kwargs is None:
broadcast_kwargs = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Broadcast inputs
# Only close is broadcast, others can remain unchanged thanks to flexible indexing
broadcastable_args = (
close,
entries,
exits,
size,
size_type,
direction,
price,
fees,
fixed_fees,
slippage,
min_size,
max_size,
reject_prob,
allow_partial,
raise_reject,
accumulate,
log,
conflict_mode,
close_first,
val_price
)
keep_raw = [False] + [True] * (len(broadcastable_args) - 1)
broadcast_kwargs = merge_dicts(dict(require_kwargs=dict(requirements='W')), broadcast_kwargs)
broadcasted_args = broadcast(*broadcastable_args, **broadcast_kwargs, keep_raw=keep_raw)
close = broadcasted_args[0]
if not checks.is_pandas(close):
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if checks.is_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
if not np.any(log):
max_logs = 1
# Perform calculation
order_records, log_records = nb.simulate_from_signals_nb(
target_shape_2d,
cs_group_lens, # group only if cash sharing is enabled to speed up
init_cash,
call_seq,
auto_call_seq,
*broadcasted_args[1:],
max_orders,
max_logs,
close.ndim == 2
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq,
**kwargs
)
@classmethod
def from_orders(cls, close, size, size_type=None, direction=None, price=None, fees=None,
fixed_fees=None, slippage=None, min_size=None, max_size=None, reject_prob=None,
allow_partial=None, raise_reject=None, log=None, val_price=None, init_cash=None,
cash_sharing=None, call_seq=None, max_orders=None, max_logs=None, seed=None,
group_by=None, broadcast_kwargs=None, wrapper_kwargs=None, freq=None, **kwargs):
"""Simulate portfolio from orders.
Starting with initial cash `init_cash`, orders the number of shares specified in `size`
for `price`.
Args:
close (array_like): Reference price, such as close.
Will broadcast.
Will be used for calculating unrealized P&L and portfolio value.
size (float or array_like): Size to order.
Will broadcast.
Behavior depends upon `size_type` and `direction`. For `SizeType.Shares`:
* Set to any number to buy/sell some fixed amount of shares.
Longs are limited by cash in the account, while shorts are unlimited.
* Set to `np.inf` to buy shares for all cash, or `-np.inf` to sell shares for
initial margin of 100%. If `direction` is not `all`, `-np.inf` will close the position.
* Set to `np.nan` or 0 to skip.
For any target size:
* Set to any number to buy/sell amount of shares relative to current holdings or value.
* Set to 0 to close the current position.
* Set to `np.nan` to skip.
size_type (SizeType or array_like): See `vectorbt.portfolio.enums.SizeType`.
Will broadcast.
!!! note
`SizeType.Percent` does not support position reversal. Switch to a single direction.
!!! warning
Be cautious using `SizeType.Percent` with `call_seq` set to 'auto'.
To execute sell orders before buy orders, the value of each order in the group
needs to be approximated in advance. But since `SizeType.Percent` depends
upon cash balance, which cannot be calculated in advance, the latest cash balance
is used. This can yield wrong call sequence for buy orders.
direction (Direction or array_like): See `vectorbt.portfolio.enums.Direction`.
Will broadcast.
price (array_like of float): Order price.
Defaults to `close`. Will broadcast.
fees (float or array_like): Fees in percentage of the order value.
Will broadcast. Note that 0.01 = 1%.
fixed_fees (float or array_like): Fixed amount of fees to pay per order.
Will broadcast.
slippage (float or array_like): Slippage in percentage of price.
Will broadcast. Note that 0.01 = 1%.
min_size (float or array_like): Minimum size for an order to be accepted.
Will broadcast.
max_size (float or array_like): Maximum size for an order.
Will broadcast.
Will be partially filled if exceeded.
reject_prob (float or array_like): Order rejection probability.
Will broadcast.
allow_partial (bool or array_like): Whether to allow partial fills.
Will broadcast.
Does not apply when size is `np.inf`.
raise_reject (bool or array_like): Whether to raise an exception if order gets rejected.
Will broadcast.
log (bool or array_like): Whether to log orders.
Will broadcast.
val_price (array_like of float): Asset valuation price.
Defaults to `price`. Will broadcast.
Used at the time of decision making to calculate value of each asset in the group,
for example, to convert target value into target shares.
!!! note
Make sure to use timestamp for `val_price` that comes before timestamps of
all orders in the group with cash sharing (previous `close` for example),
otherwise you're cheating yourself.
init_cash (InitCashMode, float or array_like of float): Initial capital.
See `init_cash` in `Portfolio.from_order_func`.
cash_sharing (bool): Whether to share cash within the same group.
!!! warning
Introduces cross-asset dependencies.
This method presumes that in a group of assets that share the same capital all
orders will be executed within the same tick and retain their price regardless
of their position in the queue, even though they depend upon each other and thus
cannot be executed in parallel.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
Each value in this sequence should indicate the position of column in the group to
call next. Processing of `call_seq` goes always from left to right.
For example, `[2, 0, 1]` would first call column 'c', then 'a', and finally 'b'.
* Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.
* Set to array to specify custom sequence. Will not broadcast.
If `CallSeqType.Auto` selected, rearranges calls dynamically based on order value.
Calculates value of all orders per row and group, and sorts them by this value.
Sell orders will be executed first to release funds for buy orders.
!!! warning
`CallSeqType.Auto` should be used with caution:
* It not only presumes that order prices are known beforehand, but also that
orders can be executed in arbitrary order and still retain their price.
In reality, this is hardly the case: after processing one asset, some time
has passed and the price for other assets might have already changed.
* Even if you're able to specify a slippage large enough to compensate for
this behavior, slippage itself should depend upon execution order.
This method doesn't let you do that.
* If one order is rejected, it still may execute next orders and possibly
leave them without required funds.
For more control, use `Portfolio.from_order_func`.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape if any of the `log` is True,
otherwise to 1.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case `close.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
All broadcastable arguments will be broadcast using `vectorbt.base.reshape_fns.broadcast`
but keep original shape to utilize flexible indexing and to save memory.
For defaults, see `vectorbt.settings.portfolio`.
!!! note
When `call_seq` is not `CallSeqType.Auto`, at each timestamp, processing of the assets in
a group goes strictly in order defined in `call_seq`. This order can't be changed dynamically.
This has one big implication for this particular method: the last asset in the call stack
cannot be processed until other assets are processed. This is the reason why rebalancing
cannot work properly in this setting: one has to specify percentages for all assets beforehand
and then tweak the processing order to sell to-be-sold assets first in order to release funds
for to-be-bought assets. This can be automatically done by using `CallSeqType.Auto`.
!!! hint
All broadcastable arguments can be set per frame, series, row, column, or element.
## Example
Buy 10 shares each tick:
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> portfolio = vbt.Portfolio.from_orders(close, 10)
>>> portfolio.shares()
0 10.0
1 20.0
2 30.0
3 40.0
4 40.0
dtype: float64
>>> portfolio.cash()
0 90.0
1 70.0
2 40.0
3 0.0
4 0.0
dtype: float64
```
Reverse each position by first closing it:
```python-repl
>>> size = [1, 0, -1, 0, 1]
>>> portfolio = vbt.Portfolio.from_orders(close, size, size_type='targetpercent')
>>> portfolio.shares()
0 100.000000
1 0.000000
2 -66.666667
3 0.000000
4 26.666667
dtype: float64
>>> portfolio.cash()
0 0.000000
1 200.000000
2 400.000000
3 133.333333
4 0.000000
dtype: float64
```
Equal-weighted portfolio as in `vectorbt.portfolio.nb.simulate_nb` example:
It's more compact but has less control over execution:
```python-repl
>>> import numpy as np
>>> np.random.seed(42)
>>> close = pd.DataFrame(np.random.uniform(1, 10, size=(5, 3)))
>>> size = pd.Series(np.full(5, 1/3)) # each column 33.3%
>>> size[1::2] = np.nan # skip every second tick
>>> portfolio = vbt.Portfolio.from_orders(
... close, # acts both as reference and order price here
... size,
... size_type='targetpercent',
... call_seq='auto', # first sell then buy
... group_by=True, # one group
... cash_sharing=True, # assets share the same cash
... fees=0.001, fixed_fees=1., slippage=0.001 # costs
... )
>>> portfolio.holding_value(group_by=False).vbt.plot()
```

"""
# Get defaults
from vectorbt import settings
if size is None:
size = settings.portfolio['size']
if size_type is None:
size_type = settings.portfolio['size_type']
size_type = convert_str_enum_value(SizeType, size_type)
if direction is None:
direction = settings.portfolio['order_direction']
direction = convert_str_enum_value(Direction, direction)
if price is None:
price = close
if fees is None:
fees = settings.portfolio['fees']
if fixed_fees is None:
fixed_fees = settings.portfolio['fixed_fees']
if slippage is None:
slippage = settings.portfolio['slippage']
if min_size is None:
min_size = settings.portfolio['min_size']
if max_size is None:
max_size = settings.portfolio['max_size']
if reject_prob is None:
reject_prob = settings.portfolio['reject_prob']
if allow_partial is None:
allow_partial = settings.portfolio['allow_partial']
if raise_reject is None:
raise_reject = settings.portfolio['raise_reject']
if log is None:
log = settings.portfolio['log']
if val_price is None:
val_price = price
if init_cash is None:
init_cash = settings.portfolio['init_cash']
init_cash = convert_str_enum_value(InitCashMode, init_cash)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = settings.portfolio['cash_sharing']
if call_seq is None:
call_seq = settings.portfolio['call_seq']
call_seq = convert_str_enum_value(CallSeqType, call_seq)
auto_call_seq = False
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
call_seq = CallSeqType.Default
auto_call_seq = True
if seed is None:
seed = settings.portfolio['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = settings.portfolio['freq']
if broadcast_kwargs is None:
broadcast_kwargs = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Broadcast inputs
# Only close is broadcast, others can remain unchanged thanks to flexible indexing
broadcastable_args = (
close,
size,
size_type,
direction,
price,
fees,
fixed_fees,
slippage,
min_size,
max_size,
reject_prob,
allow_partial,
raise_reject,
log,
val_price
)
keep_raw = [False] + [True] * (len(broadcastable_args) - 1)
broadcast_kwargs = merge_dicts(dict(require_kwargs=dict(requirements='W')), broadcast_kwargs)
broadcasted_args = broadcast(*broadcastable_args, **broadcast_kwargs, keep_raw=keep_raw)
close = broadcasted_args[0]
if not checks.is_pandas(close):
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if checks.is_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
if not np.any(log):
max_logs = 1
# Perform calculation
order_records, log_records = nb.simulate_from_orders_nb(
target_shape_2d,
cs_group_lens, # group only if cash sharing is enabled to speed up
init_cash,
call_seq,
auto_call_seq,
*broadcasted_args[1:],
max_orders,
max_logs,
close.ndim == 2
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq,
**kwargs
)
@classmethod
def from_order_func(cls, close, order_func_nb, *order_args, target_shape=None, keys=None,
init_cash=None, cash_sharing=None, call_seq=None, active_mask=None,
prep_func_nb=None, prep_args=None, group_prep_func_nb=None, group_prep_args=None,
row_prep_func_nb=None, row_prep_args=None, segment_prep_func_nb=None,
segment_prep_args=None, row_wise=None, max_orders=None, max_logs=None,
seed=None, group_by=None, broadcast_kwargs=None, wrapper_kwargs=None, freq=None, **kwargs):
"""Build portfolio from a custom order function.
For details, see `vectorbt.portfolio.nb.simulate_nb`.
if `row_wise` is True, also see `vectorbt.portfolio.nb.simulate_row_wise_nb`.
Args:
close (array_like): Reference price, such as close.
Will broadcast to `target_shape`.
Will be used for calculating unrealized P&L and portfolio value.
order_func_nb (callable): Order generation function.
*order_args: Arguments passed to `order_func_nb`.
target_shape (tuple): Target shape to iterate over. Defaults to `close.shape`.
keys (sequence): Outermost column level.
Each element should correspond to one iteration over columns in `close`.
Should be set only if `target_shape` is bigger than `close.shape`.
init_cash (InitCashMode, float or array_like of float): Initial capital.
By default, will broadcast to the number of columns.
If cash sharing is enabled, will broadcast to the number of groups.
See `vectorbt.portfolio.enums.InitCashMode` to find optimal initial cash.
!!! note
Mode `InitCashMode.AutoAlign` is applied after the portfolio is initialized
to set the same initial cash for all columns/groups. Changing grouping
will change the initial cash, so be aware when indexing.
cash_sharing (bool): Whether to share cash within the same group.
!!! warning
Introduces cross-asset dependencies.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
* Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.
* Set to array to specify custom sequence. Will not broadcast.
!!! note
CallSeqType.Auto should be implemented manually.
Use `auto_call_seq_ctx_nb` in `segment_prep_func_nb`.
active_mask (int or array_like of bool): Mask of whether a particular segment should be executed.
Supplying an integer will activate every n-th row (just for convenience).
Supplying a boolean will broadcast to the number of rows and groups.
prep_func_nb (callable): Simulation preparation function.
prep_args (tuple): Packed arguments passed to `prep_func_nb`.
Defaults to `()`.
group_prep_func_nb (callable): Group preparation function.
Called only if `row_wise` is False.
group_prep_args (tuple): Packed arguments passed to `group_prep_func_nb`.
Defaults to `()`.
row_prep_func_nb (callable): Row preparation function.
Called only if `row_wise` is True.
row_prep_args (tuple): Packed arguments passed to `row_prep_func_nb`.
Defaults to `()`.
segment_prep_func_nb (callable): Segment preparation function.
segment_prep_args (tuple): Packed arguments passed to `segment_prep_func_nb`.
Defaults to `()`.
row_wise (bool): Whether to iterate over rows rather than columns/groups.
See `vectorbt.portfolio.nb.simulate_row_wise_nb`.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case `close.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
For defaults, see `vectorbt.settings.portfolio`.
!!! note
All passed functions should be Numba-compiled.
Objects passed as arguments to both functions will not broadcast to `target_shape`
as their purpose is unknown. You should broadcast manually or use flexible indexing.
Also see notes on `Portfolio.from_orders`.
!!! note
In contrast to other methods, the valuation price is previous `close`
instead of order price, since the price of an order is unknown before call.
You can still set valuation price explicitly in `segment_prep_func_nb`.
## Example
Buy 10 shares each tick:
```python-repl
>>> import pandas as pd
>>> from numba import njit
>>> import vectorbt as vbt
>>> from vectorbt.portfolio.nb import create_order_nb
>>> @njit
... def order_func_nb(oc, size):
... return create_order_nb(size=size, price=oc.close[oc.i, oc.col])
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> portfolio = vbt.Portfolio.from_order_func(close, order_func_nb, 10)
>>> portfolio.shares()
0 10.0
1 20.0
2 30.0
3 40.0
4 40.0
dtype: float64
>>> portfolio.cash()
0 90.0
1 70.0
2 40.0
3 0.0
4 0.0
dtype: float64
```
Reverse each position by first closing it. Keep state of last position to determine
which position to open next (just as an example, there are easier ways to do this):
```python-repl
>>> import numpy as np
>>> @njit
... def group_prep_func_nb(gc):
... last_pos_state = np.array([-1])
... return (last_pos_state,)
>>> @njit
... def order_func_nb(oc, last_pos_state):
... if oc.shares_now > 0:
... size = -oc.shares_now # close long
... elif oc.shares_now < 0:
... size = -oc.shares_now # close short
... else:
... if last_pos_state[0] == 1:
... size = -np.inf # open short
... last_pos_state[0] = -1
... else:
... size = np.inf # open long
... last_pos_state[0] = 1
...
... return create_order_nb(size=size, price=oc.close[oc.i, oc.col])
>>> portfolio = vbt.Portfolio.from_order_func(
... close, order_func_nb, group_prep_func_nb=group_prep_func_nb)
>>> portfolio.shares()
0 100.0
1 0.0
2 -100.0
3 0.0
4 20.0
dtype: float64
>>> portfolio.cash()
0 0.0
1 200.0
2 500.0
3 100.0
4 0.0
dtype: float64
```
Equal-weighted portfolio as in `vectorbt.portfolio.nb.simulate_nb` example:
```python-repl
>>> from vectorbt.portfolio.nb import auto_call_seq_ctx_nb
>>> from vectorbt.portfolio.enums import SizeType, Direction
>>> @njit
... def group_prep_func_nb(gc):
... '''Define empty arrays for each group.'''
... size = np.empty(gc.group_len, dtype=np.float_)
... size_type = np.empty(gc.group_len, dtype=np.int_)
... direction = np.empty(gc.group_len, dtype=np.int_)
... temp_float_arr = np.empty(gc.group_len, dtype=np.float_)
... return size, size_type, direction, temp_float_arr
>>> @njit
... def segment_prep_func_nb(sc, size, size_type, direction, temp_float_arr):
... '''Perform rebalancing at each segment.'''
... for k in range(sc.group_len):
... col = sc.from_col + k
... size[k] = 1 / sc.group_len
... size_type[k] = SizeType.TargetPercent
... direction[k] = Direction.LongOnly
... sc.last_val_price[col] = sc.close[sc.i, col]
... auto_call_seq_ctx_nb(sc, size, size_type, direction, temp_float_arr)
... return size, size_type, direction
>>> @njit
... def order_func_nb(oc, size, size_type, direction, fees, fixed_fees, slippage):
... '''Place an order.'''
... col_i = oc.call_seq_now[oc.call_idx]
... return create_order_nb(
... size=size[col_i],
... size_type=size_type[col_i],
... price=oc.close[oc.i, oc.col],
... fees=fees, fixed_fees=fixed_fees, slippage=slippage,
... direction=direction[col_i]
... )
>>> np.random.seed(42)
>>> close = np.random.uniform(1, 10, size=(5, 3))
>>> fees = 0.001
>>> fixed_fees = 1.
>>> slippage = 0.001
>>> portfolio = vbt.Portfolio.from_order_func(
... close, # acts both as reference and order price here
... order_func_nb, fees, fixed_fees, slippage, # order_args as *args
... active_mask=2, # rebalance every second tick
... group_prep_func_nb=group_prep_func_nb,
... segment_prep_func_nb=segment_prep_func_nb,
... cash_sharing=True, group_by=True, # one group with cash sharing
... )
>>> portfolio.holding_value(group_by=False).vbt.plot()
```

"""
# Get defaults
from vectorbt import settings
if not checks.is_pandas(close):
if not checks.is_array(close):
close = np.asarray(close)
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
if target_shape is None:
target_shape = close.shape
if init_cash is None:
init_cash = settings.portfolio['init_cash']
init_cash = convert_str_enum_value(InitCashMode, init_cash)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = settings.portfolio['cash_sharing']
if call_seq is None:
call_seq = settings.portfolio['call_seq']
call_seq = convert_str_enum_value(CallSeqType, call_seq)
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
raise ValueError("CallSeqType.Auto should be implemented manually. "
"Use auto_call_seq_ctx_nb in segment_prep_func_nb.")
if active_mask is None:
active_mask = True
if row_wise is None:
row_wise = settings.portfolio['row_wise']
if seed is None:
seed = settings.portfolio['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = settings.portfolio['freq']
if broadcast_kwargs is None:
broadcast_kwargs = {}
require_kwargs = dict(require_kwargs=dict(requirements='W'))
broadcast_kwargs = merge_dicts(require_kwargs, broadcast_kwargs)
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Broadcast inputs
target_shape_2d = (target_shape[0], target_shape[1] if len(target_shape) > 1 else 1)
if close.shape != target_shape:
if len(close.vbt.wrapper.columns) <= target_shape_2d[1]:
if target_shape_2d[1] % len(close.vbt.wrapper.columns) != 0:
raise ValueError("Cannot broadcast close to target_shape")
if keys is None:
keys = pd.Index(np.arange(target_shape_2d[1]), name='iteration_idx')
tile_times = target_shape_2d[1] // len(close.vbt.wrapper.columns)
close = close.vbt.tile(tile_times, keys=keys)
close = broadcast(close, to_shape=target_shape, **broadcast_kwargs)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if isinstance(active_mask, int):
_active_mask = np.full((target_shape_2d[0], len(group_lens)), False)
_active_mask[0::active_mask] = True
active_mask = _active_mask
else:
active_mask = broadcast(
active_mask,
to_shape=(target_shape_2d[0], len(group_lens)),
to_pd=False,
**require_kwargs
)
if checks.is_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
# Prepare arguments
if prep_func_nb is None:
prep_func_nb = nb.empty_prep_nb
if prep_args is None:
prep_args = ()
if group_prep_func_nb is None:
group_prep_func_nb = nb.empty_prep_nb
if group_prep_args is None:
group_prep_args = ()
if row_prep_func_nb is None:
row_prep_func_nb = nb.empty_prep_nb
if row_prep_args is None:
row_prep_args = ()
if segment_prep_func_nb is None:
segment_prep_func_nb = nb.empty_prep_nb
if segment_prep_args is None:
segment_prep_args = ()
# Perform calculation
if row_wise:
order_records, log_records = nb.simulate_row_wise_nb(
target_shape_2d,
to_2d(close, raw=True),
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
prep_func_nb,
prep_args,
row_prep_func_nb,
row_prep_args,
segment_prep_func_nb,
segment_prep_args,
order_func_nb,
order_args,
max_orders,
max_logs
)
else:
order_records, log_records = nb.simulate_nb(
target_shape_2d,
to_2d(close, raw=True),
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
prep_func_nb,
prep_args,
group_prep_func_nb,
group_prep_args,
segment_prep_func_nb,
segment_prep_args,
order_func_nb,
order_args,
max_orders,
max_logs
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq,
**kwargs
)
# ############# Properties ############# #
@property
def wrapper(self):
"""Array wrapper."""
if self.cash_sharing:
# Allow only disabling grouping when needed (but not globally, see regroup)
return self._wrapper.copy(
allow_enable=False,
allow_modify=False
)
return self._wrapper
def regroup(self, group_by, **kwargs):
"""Regroup this object.
See `vectorbt.base.array_wrapper.Wrapping.regroup`."""
if self.cash_sharing:
if self.wrapper.grouper.is_grouping_modified(group_by=group_by):
raise ValueError("Cannot modify grouping globally when cash_sharing=True")
return Wrapping.regroup(self, group_by, **kwargs)
@property
def cash_sharing(self):
"""Whether to share cash within the same group."""
return self._cash_sharing
@property
def call_seq(self, wrap_kwargs=None):
"""Sequence of calls per row and group."""
return self.wrapper.wrap(self._call_seq, group_by=False, **merge_dicts({}, wrap_kwargs))
@property
def incl_unrealized(self):
"""Whether to include unrealized trade P&L in statistics."""
return self._incl_unrealized
@property
def use_filled_close(self):
"""Whether to forward-backward fill NaN values in `Portfolio.close`."""
return self._use_filled_close
# ############# Reference price ############# #
@property
def close(self):
"""Price per share series."""
return self._close
@cached_method
def fill_close(self, ffill=True, bfill=True, wrap_kwargs=None):
"""Fill NaN values of `Portfolio.close`.
Use `ffill` and `bfill` to fill forwards and backwards respectively."""
close = to_2d(self.close, raw=True)
if ffill and np.any(np.isnan(close[-1, :])):
close = generic_nb.ffill_nb(close)
if bfill and np.any(np.isnan(close[0, :])):
close = generic_nb.ffill_nb(close[::-1, :])[::-1, :]
return self.wrapper.wrap(close, group_by=False, **merge_dicts({}, wrap_kwargs))
# ############# Records ############# #
@property
def order_records(self):
"""A structured NumPy array of order records."""
return self._order_records
@cached_property
def orders(self):
"""`Portfolio.get_orders` with default arguments."""
return Orders(self.wrapper, self.order_records, self.close)
def get_orders(self, group_by=None):
"""Get order records.
See `vectorbt.portfolio.orders.Orders`."""
return self.orders.regroup(group_by=group_by)
@property
def log_records(self):
"""A structured NumPy array of log records."""
return self._log_records
@cached_property
def logs(self):
"""`Portfolio.get_logs` with default arguments."""
return Logs(self.wrapper, self.log_records)
def get_logs(self, group_by=None):
"""Get log records.
See `vectorbt.portfolio.logs.Logs`."""
return self.logs.regroup(group_by=group_by)
@cached_property
def trades(self):
"""`Portfolio.get_trades` with default arguments."""
return Trades.from_orders(self.orders)
def get_trades(self, group_by=None):
"""Get trade records.
See `vectorbt.portfolio.trades.Trades`."""
return self.trades.regroup(group_by=group_by)
@cached_property
def positions(self):
"""`Portfolio.get_positions` with default arguments."""
return Positions.from_trades(self.trades)
def get_positions(self, group_by=None):
"""Get position records.
See `vectorbt.portfolio.trades.Positions`."""
return self.positions.regroup(group_by=group_by)
@cached_property
def drawdowns(self):
"""`Portfolio.get_drawdowns` with default arguments."""
return self.get_drawdowns()
@cached_method
def get_drawdowns(self, **kwargs):
"""Get drawdown records from `Portfolio.value`.
See `vectorbt.generic.drawdowns.Drawdowns`.
`**kwargs` are passed to `Portfolio.value`."""
return Drawdowns.from_ts(self.value(**kwargs), freq=self.wrapper.freq)
# ############# Shares ############# #
@cached_method
def share_flow(self, direction='all', wrap_kwargs=None):
"""Get share flow series per column."""
direction = convert_str_enum_value(Direction, direction)
share_flow = nb.share_flow_nb(
self.wrapper.shape_2d,
self.orders.values,
self.orders.col_mapper.col_map,
direction
)
return self.wrapper.wrap(share_flow, group_by=False, **merge_dicts({}, wrap_kwargs))
@cached_method
def shares(self, direction='all', wrap_kwargs=None):
"""Get share series per column."""
direction = convert_str_enum_value(Direction, direction)
share_flow = to_2d(self.share_flow(direction='all'), raw=True)
shares = nb.shares_nb(share_flow)
if direction == Direction.LongOnly:
shares = np.where(shares > 0, shares, 0.)
if direction == Direction.ShortOnly:
shares = np.where(shares < 0, -shares, 0.)
return self.wrapper.wrap(shares, group_by=False, **merge_dicts({}, wrap_kwargs))
@cached_method
def pos_mask(self, direction='all', group_by=None, wrap_kwargs=None):
"""Get position mask per column/group."""
direction = convert_str_enum_value(Direction, direction)
shares = to_2d(self.shares(direction=direction), raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
pos_mask = to_2d(self.pos_mask(direction=direction, group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
pos_mask = nb.pos_mask_grouped_nb(pos_mask, group_lens)
else:
pos_mask = shares != 0
return self.wrapper.wrap(pos_mask, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def pos_coverage(self, direction='all', group_by=None, wrap_kwargs=None):
"""Get position coverage per column/group."""
direction = convert_str_enum_value(Direction, direction)
shares = to_2d(self.shares(direction=direction), raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
pos_mask = to_2d(self.pos_mask(direction=direction, group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
pos_coverage = nb.pos_coverage_grouped_nb(pos_mask, group_lens)
else:
pos_coverage = np.mean(shares != 0, axis=0)
wrap_kwargs = merge_dicts(dict(name_or_index='pos_coverage'), wrap_kwargs)
return self.wrapper.wrap_reduced(pos_coverage, group_by=group_by, **wrap_kwargs)
# ############# Cash ############# #
@cached_method
def cash_flow(self, group_by=None, short_cash=True, wrap_kwargs=None):
"""Get cash flow series per column/group.
When `short_cash` is set to False, cash never goes above the initial level,
because an operation always costs money."""
if self.wrapper.grouper.is_grouped(group_by=group_by):
cash_flow = to_2d(self.cash_flow(group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
cash_flow = nb.cash_flow_grouped_nb(cash_flow, group_lens)
else:
cash_flow = nb.cash_flow_nb(
self.wrapper.shape_2d,
self.orders.values,
self.orders.col_mapper.col_map,
short_cash
)
return self.wrapper.wrap(cash_flow, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_property
def init_cash(self):
"""`Portfolio.get_init_cash` with default arguments."""
return self.get_init_cash()
@cached_method
def get_init_cash(self, group_by=None, wrap_kwargs=None):
"""Initial amount of cash per column/group with default arguments.
!!! note
If initial cash is found automatically and no own cash is used throughout simulation
(for example, when shorting), initial cash will be set to 1 instead of 0 to
enable smooth calculation of returns."""
if isinstance(self._init_cash, int):
cash_flow = to_2d(self.cash_flow(group_by=group_by), raw=True)
cash_min = np.min(np.cumsum(cash_flow, axis=0), axis=0)
init_cash = np.where(cash_min < 0, np.abs(cash_min), 1.)
if self._init_cash == InitCashMode.AutoAlign:
init_cash = np.full(init_cash.shape, np.max(init_cash))
else:
init_cash = to_1d(self._init_cash, raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
init_cash = nb.init_cash_grouped_nb(init_cash, group_lens, self.cash_sharing)
else:
group_lens = self.wrapper.grouper.get_group_lens()
init_cash = nb.init_cash_nb(init_cash, group_lens, self.cash_sharing)
wrap_kwargs = merge_dicts(dict(name_or_index='init_cash'), wrap_kwargs)
return self.wrapper.wrap_reduced(init_cash, group_by=group_by, **wrap_kwargs)
@cached_method
def cash(self, group_by=None, in_sim_order=False, short_cash=True, wrap_kwargs=None):
"""Get cash balance series per column/group."""
if in_sim_order and not self.cash_sharing:
raise ValueError("Cash sharing must be enabled for in_sim_order=True")
cash_flow = to_2d(self.cash_flow(group_by=group_by, short_cash=short_cash), raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
cash = nb.cash_grouped_nb(
self.wrapper.shape_2d,
cash_flow,
group_lens,
init_cash
)
else:
group_lens = self.wrapper.grouper.get_group_lens()
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
init_cash = to_1d(self.init_cash, raw=True)
call_seq = to_2d(self.call_seq, raw=True)
cash = nb.cash_in_sim_order_nb(cash_flow, group_lens, init_cash, call_seq)
else:
init_cash = to_1d(self.get_init_cash(group_by=False), raw=True)
cash = nb.cash_nb(cash_flow, group_lens, init_cash)
return self.wrapper.wrap(cash, group_by=group_by, **merge_dicts({}, wrap_kwargs))
# ############# Performance ############# #
@cached_method
def holding_value(self, direction='all', group_by=None, wrap_kwargs=None):
"""Get holding value series per column/group."""
direction = convert_str_enum_value(Direction, direction)
close = to_2d(self.close, raw=True).copy()
shares = to_2d(self.shares(direction=direction), raw=True)
close[shares == 0] = 0. # for price being NaN
if self.wrapper.grouper.is_grouped(group_by=group_by):
holding_value = to_2d(self.holding_value(direction=direction, group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
holding_value = nb.holding_value_grouped_nb(holding_value, group_lens)
else:
holding_value = nb.holding_value_nb(close, shares)
return self.wrapper.wrap(holding_value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def gross_exposure(self, direction='all', group_by=None, wrap_kwargs=None):
"""Get gross exposure."""
holding_value = to_2d(self.holding_value(group_by=group_by, direction=direction), raw=True)
cash = to_2d(self.cash(group_by=group_by, short_cash=False), raw=True)
gross_exposure = nb.gross_exposure_nb(holding_value, cash)
return self.wrapper.wrap(gross_exposure, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def net_exposure(self, group_by=None, wrap_kwargs=None):
"""Get net exposure."""
long_exposure = to_2d(self.gross_exposure(direction='longonly', group_by=group_by), raw=True)
short_exposure = to_2d(self.gross_exposure(direction='shortonly', group_by=group_by), raw=True)
net_exposure = long_exposure - short_exposure
return self.wrapper.wrap(net_exposure, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def value(self, group_by=None, in_sim_order=False, wrap_kwargs=None):
"""Get portfolio value series per column/group.
By default, will generate portfolio value for each asset based on cash flows and thus
independent from other assets, with initial cash and shares being that of the entire group.
Useful for generating returns and comparing assets within the same group.
When `group_by` is False and `in_sim_order` is True, returns value generated in
simulation order (see [row-major order](https://en.wikipedia.org/wiki/Row-_and_column-major_order).
This value cannot be used for generating returns as-is. Useful to analyze how value
evolved throughout simulation."""
cash = to_2d(self.cash(group_by=group_by, in_sim_order=in_sim_order), raw=True)
holding_value = to_2d(self.holding_value(group_by=group_by), raw=True)
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
group_lens = self.wrapper.grouper.get_group_lens()
call_seq = to_2d(self.call_seq, raw=True)
value = nb.value_in_sim_order_nb(cash, holding_value, group_lens, call_seq)
# price of NaN is already addressed by ungrouped_value_nb
else:
value = nb.value_nb(cash, holding_value)
return self.wrapper.wrap(value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def total_profit(self, group_by=None, wrap_kwargs=None):
"""Get total profit per column/group.
Calculated directly from order records (fast).
Uses filled close if `Portfolio.use_filled_close` is True."""
if self.wrapper.grouper.is_grouped(group_by=group_by):
total_profit = to_1d(self.total_profit(group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
total_profit = nb.total_profit_grouped_nb(
total_profit,
group_lens
)
else:
if self.use_filled_close:
close = to_2d(self.fill_close(), raw=True)
else:
close = to_2d(self.close, raw=True)
total_profit = nb.total_profit_nb(
self.wrapper.shape_2d,
close,
self.orders.values,
self.orders.col_mapper.col_map
)
wrap_kwargs = merge_dicts(dict(name_or_index='total_profit'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_profit, group_by=group_by, **wrap_kwargs)
@cached_method
def final_value(self, group_by=None, wrap_kwargs=None):
"""Get total profit per column/group."""
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
total_profit = to_1d(self.total_profit(group_by=group_by), raw=True)
final_value = nb.final_value_nb(total_profit, init_cash)
wrap_kwargs = merge_dicts(dict(name_or_index='final_value'), wrap_kwargs)
return self.wrapper.wrap_reduced(final_value, group_by=group_by, **wrap_kwargs)
@cached_method
def total_return(self, group_by=None, wrap_kwargs=None):
"""Get total profit per column/group."""
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
total_profit = to_1d(self.total_profit(group_by=group_by), raw=True)
total_return = nb.total_return_nb(total_profit, init_cash)
wrap_kwargs = merge_dicts(dict(name_or_index='total_return'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_return, group_by=group_by, **wrap_kwargs)
@cached_method
def returns(self, group_by=None, in_sim_order=False, wrap_kwargs=None):
"""Get return series per column/group based on portfolio value."""
value = to_2d(self.value(group_by=group_by, in_sim_order=in_sim_order), raw=True)
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
group_lens = self.wrapper.grouper.get_group_lens()
init_cash_grouped = to_1d(self.init_cash, raw=True)
call_seq = to_2d(self.call_seq, raw=True)
returns = nb.returns_in_sim_order_nb(value, group_lens, init_cash_grouped, call_seq)
else:
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
returns = nb.returns_nb(value, init_cash)
return self.wrapper.wrap(returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def active_returns(self, group_by=None, wrap_kwargs=None):
"""Get active return series per column/group.
This type of returns is based solely on cash flows and holding value rather than portfolio value.
It ignores passive cash and thus it will return the same numbers irrespective of the amount of
cash currently available, even `np.inf`. The scale of returns is comparable to that of going
all in and keeping available cash at zero."""
cash_flow = to_2d(self.cash_flow(group_by=group_by), raw=True)
holding_value = to_2d(self.holding_value(group_by=group_by), raw=True)
active_returns = nb.active_returns_nb(cash_flow, holding_value)
return self.wrapper.wrap(active_returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def market_value(self, group_by=None, wrap_kwargs=None):
"""Get market (benchmark) value series per column/group.
If grouped, evenly distributes initial cash among assets in the group.
Uses filled close if `Portfolio.use_filled_close` is True.
!!! note
Does not take into account fees and slippage. For this, create a separate portfolio."""
if self.use_filled_close:
close = to_2d(self.fill_close(), raw=True)
else:
close = to_2d(self.close, raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
init_cash_grouped = to_1d(self.get_init_cash(group_by=group_by), raw=True)
market_value = nb.market_value_grouped_nb(close, group_lens, init_cash_grouped)
else:
init_cash = to_1d(self.get_init_cash(group_by=False), raw=True)
market_value = nb.market_value_nb(close, init_cash)
return self.wrapper.wrap(market_value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def market_returns(self, group_by=None, wrap_kwargs=None):
"""Get return series per column/group based on market (benchmark) value."""
market_value = to_2d(self.market_value(group_by=group_by), raw=True)
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
market_returns = nb.returns_nb(market_value, init_cash)
return self.wrapper.wrap(market_returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def total_market_return(self, group_by=None, wrap_kwargs=None):
"""Get total market (benchmark) return."""
market_value = to_2d(self.market_value(group_by=group_by), raw=True)
total_market_return = nb.total_market_return_nb(market_value)
wrap_kwargs = merge_dicts(dict(name_or_index='total_market_return'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_market_return, group_by=group_by, **wrap_kwargs)
@cached_method
def stats(self, column=None, group_by=None, incl_unrealized=None, active_returns=False,
in_sim_order=False, agg_func=_mean_agg_func, wrap_kwargs=None, **kwargs):
"""Compute various statistics on this portfolio.
`kwargs` will be passed to each `vectorbt.returns.accessors.ReturnsAccessor` method.
Can either return aggregated statistics by reducing metrics of all columns with
`agg_func` (mean by default) or return statistics for a single column if `column`
was specified or portfolio contains only one column of data. To display rich data types
such as durations correctly, use an aggregation function that can be applied on `pd.Series`.
!!! note
Use `column` only if caching is enabled, otherwise it may re-compute the same
objects multiple times."""
if self.wrapper.freq is None:
raise ValueError("Couldn't parse the frequency of index. You must set `freq`.")
# Pre-calculate
trades = self.get_trades(group_by=group_by)
if incl_unrealized is None:
incl_unrealized = self.incl_unrealized
if not incl_unrealized:
trades = trades.closed
drawdowns = self.get_drawdowns(group_by=group_by)
if active_returns:
returns = self.active_returns(group_by=group_by)
else:
returns = self.returns(group_by=group_by, in_sim_order=in_sim_order)
# Run stats
stats_df = pd.DataFrame({
'Start': self.wrapper.index[0],
'End': self.wrapper.index[-1],
'Duration': self.wrapper.shape[0] * self.wrapper.freq,
'Init. Cash': self.get_init_cash(group_by=group_by),
'Total Profit': self.total_profit(group_by=group_by),
'Total Return [%]': self.total_return(group_by=group_by) * 100,
'Benchmark Return [%]': self.total_market_return(group_by=group_by) * 100,
'Position Coverage [%]': self.pos_coverage(group_by=group_by) * 100,
'Max. Drawdown [%]': -drawdowns.max_drawdown() * 100,
'Avg. Drawdown [%]': -drawdowns.avg_drawdown() * 100,
'Max. Drawdown Duration': drawdowns.max_duration(),
'Avg. Drawdown Duration': drawdowns.avg_duration(),
'Num. Trades': trades.count(),
'Win Rate [%]': trades.win_rate() * 100,
'Best Trade [%]': trades.returns.max() * 100,
'Worst Trade [%]': trades.returns.min() * 100,
'Avg. Trade [%]': trades.returns.mean() * 100,
'Max. Trade Duration': trades.duration.max(wrap_kwargs=dict(time_units=True)),
'Avg. Trade Duration': trades.duration.mean(wrap_kwargs=dict(time_units=True)),
'Expectancy': trades.expectancy(),
'SQN': trades.sqn(),
'Gross Exposure': self.gross_exposure(group_by=group_by).mean(),
'Sharpe Ratio': self.sharpe_ratio(reuse_returns=returns, **kwargs),
'Sortino Ratio': self.sortino_ratio(reuse_returns=returns, **kwargs),
'Calmar Ratio': self.calmar_ratio(reuse_returns=returns, **kwargs)
}, index=self.wrapper.grouper.get_columns(group_by=group_by))
# Select columns or reduce
if self.wrapper.get_ndim(group_by=group_by) == 1:
wrap_kwargs = merge_dicts(dict(name_or_index=stats_df.columns), wrap_kwargs)
return self.wrapper.wrap_reduced(stats_df.iloc[0], group_by=group_by, **wrap_kwargs)
if column is not None:
return stats_df.loc[column]
if agg_func is not None:
if agg_func == _mean_agg_func:
warnings.warn("Taking mean across columns. To return a DataFrame, pass agg_func=None.", stacklevel=2)
func_name = 'stats_mean'
else:
func_name = 'stats_' + agg_func.__name__
agg_stats_sr = | pd.Series(index=stats_df.columns, name=func_name) | pandas.Series |
from os import replace
import pandas as pd
import numpy as np
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import pickle
rfr = RandomForestRegressor()
rfr1 = RandomForestRegressor()
lab_encoder = LabelEncoder()
df = sns.load_dataset('tips')
#preprocessing steps
bin_category = [feature for feature in df.columns if ((str(df[feature].dtype) == 'category') and (len(df[feature].unique()) == 2))]
for feature in bin_category:
df[feature] = lab_encoder.fit_transform(df[feature])
day = pd.get_dummies(df['day'],drop_first=True)
df.drop('day',axis =1 ,inplace = True)
df1 = | pd.concat([df,day],axis =1) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.