prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from pathlib import Path
import re
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Series,
_testing as tm,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
from pandas.io.pytables import TableIterator
pytestmark = pytest.mark.single
def test_read_missing_key_close_store(setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with HDFStore(path, "r") as store:
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
read_hdf(store, "k1")
def test_read_column(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
msg = re.escape("select_column() got an unexpected keyword argument 'where'")
with pytest.raises(TypeError, match=msg):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
msg = re.escape(
"column [values_block_0] can not be extracted individually; "
"it is not data indexable"
)
with pytest.raises(ValueError, match=msg):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[df3.index[4:6], "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_pytables_native_read(datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(is_platform_windows(), reason="native2 read fails oddly on windows")
def test_pytables_native2_read(datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
def test_legacy_table_fixed_format_read_py2(datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_fixed_format_read_datetime_py2(datapath, setup_path):
# GH 31750
# legacy table with fixed format and datetime64 column written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_datetime_py2.h5"),
mode="r",
) as store:
result = store.select("df")
expected = DataFrame(
[[Timestamp("2020-02-06T18:00")]],
columns=["A"],
index=Index(["date"]),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = DataFrame({"a": ["a", "b"], "b": [2, 3]})
tm.assert_frame_equal(expected, result)
def test_read_hdf_open_store(setup_path):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
direct = read_hdf(path, "df")
store = HDFStore(path, mode="r")
indirect = read_hdf(store, "df")
tm.assert_frame_equal(direct, indirect)
assert store.is_open
store.close()
def test_read_hdf_iterator(setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w", format="t")
direct = read_hdf(path, "df")
iterator = read_hdf(path, "df", iterator=True)
assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
| tm.assert_frame_equal(direct, indirect) | pandas._testing.assert_frame_equal |
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
assert(isinstance(notnull(obj_series), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(isnull(float_series), Series))
assert(isinstance(isnull(obj_series), Series))
# call on DataFrame
df = DataFrame(np.random.randn(10, 5))
df['foo'] = 'bar'
result = isnull(df)
expected = result.apply(isnull)
tm.assert_frame_equal(result, expected)
def test_isnull_tuples():
result = isnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = isnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(('foo', 'bar'))
assert(not result.any())
result = isnull((u('foo'), u('bar')))
assert(not result.any())
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not com._all_not_none(1, 2, 3, None))
assert(not com._all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert(a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_is_list_like():
passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
fails = (1, '2', object())
for p in passes:
assert com.is_list_like(p)
for f in fails:
assert not com.is_list_like(f)
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
def test_ensure_platform_int():
# verify that when we create certain types of indices
# they remain the correct type under platform conversions
from pandas.core.index import Int64Index
# int64
x = Int64Index([1, 2, 3], dtype='int64')
assert(x.dtype == np.int64)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# int32
x = Int64Index([1, 2, 3], dtype='int32')
assert(x.dtype == np.int32)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = com.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
def test_is_re():
passes = re.compile('ad'),
fails = 'x', 2, 3, object()
for p in passes:
assert com.is_re(p)
for f in fails:
assert not com.is_re(f)
def test_is_recompilable():
passes = (r'a', u('x'), r'asdf', re.compile('adsf'),
| u(r'\u2233\s*') | pandas.compat.u |
from datetime import timedelta
import numpy as np
from pandas.core.groupby import BinGrouper, Grouper
from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.offsets import DateOffset, Tick, _delta_to_nanoseconds
from pandas.tseries.period import PeriodIndex, period_range
import pandas.tseries.tools as tools
import pandas.core.common as com
import pandas.compat as compat
from pandas.lib import Timestamp
import pandas.lib as lib
_DEFAULT_METHOD = 'mean'
class TimeGrouper(Grouper):
"""
Custom groupby class for time-interval grouping
Parameters
----------
freq : pandas date offset or offset alias for identifying bin edges
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
nperiods : optional, integer
convention : {'start', 'end', 'e', 's'}
If axis is PeriodIndex
Notes
-----
Use begin, end, nperiods to generate intervals that cannot be derived
directly from the associated object
"""
def __init__(self, freq='Min', closed=None, label=None, how='mean',
nperiods=None, axis=0,
fill_method=None, limit=None, loffset=None, kind=None,
convention=None, base=0, **kwargs):
freq = to_offset(freq)
end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'])
rule = freq.rule_code
if (rule in end_types or
('-' in rule and rule[:rule.find('-')] in end_types)):
if closed is None:
closed = 'right'
if label is None:
label = 'right'
else:
if closed is None:
closed = 'left'
if label is None:
label = 'left'
self.closed = closed
self.label = label
self.nperiods = nperiods
self.kind = kind
self.convention = convention or 'E'
self.convention = self.convention.lower()
self.loffset = loffset
self.how = how
self.fill_method = fill_method
self.limit = limit
self.base = base
# always sort time groupers
kwargs['sort'] = True
super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs)
def resample(self, obj):
self._set_grouper(obj, sort=True)
ax = self.grouper
if isinstance(ax, DatetimeIndex):
rs = self._resample_timestamps()
elif isinstance(ax, PeriodIndex):
offset = to_offset(self.freq)
if offset.n > 1:
if self.kind == 'period': # pragma: no cover
print('Warning: multiple of frequency -> timestamps')
# Cannot have multiple of periods, convert to timestamp
self.kind = 'timestamp'
if self.kind is None or self.kind == 'period':
rs = self._resample_periods()
else:
obj = self.obj.to_timestamp(how=self.convention)
self._set_grouper(obj)
rs = self._resample_timestamps()
elif len(ax) == 0:
return self.obj
else: # pragma: no cover
raise TypeError('Only valid with DatetimeIndex or PeriodIndex')
rs_axis = rs._get_axis(self.axis)
rs_axis.name = ax.name
return rs
def _get_grouper(self, obj):
self._set_grouper(obj)
return self._get_binner_for_resample()
def _get_binner_for_resample(self):
# create the BinGrouper
# assume that self.set_grouper(obj) has already been called
ax = self.ax
if self.kind is None or self.kind == 'timestamp':
self.binner, bins, binlabels = self._get_time_bins(ax)
else:
self.binner, bins, binlabels = self._get_time_period_bins(ax)
self.grouper = BinGrouper(bins, binlabels)
return self.binner, self.grouper, self.obj
def _get_binner_for_grouping(self, obj):
# return an ordering of the transformed group labels,
# suitable for multi-grouping, e.g the labels for
# the resampled intervals
ax = self._set_grouper(obj)
self._get_binner_for_resample()
# create the grouper
binner = self.binner
l = []
for key, group in self.grouper.get_iterator(ax):
l.extend([key]*len(group))
grouper = binner.__class__(l,freq=binner.freq,name=binner.name)
# since we may have had to sort
# may need to reorder groups here
if self.indexer is not None:
indexer = self.indexer.argsort(kind='quicksort')
grouper = grouper.take(indexer)
return grouper
def _get_time_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if len(ax) == 0:
binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = _get_range_edges(ax, self.freq, closed=self.closed,
base=self.base)
tz = ax.tz
binner = labels = DatetimeIndex(freq=self.freq,
start=first.replace(tzinfo=None),
end=last.replace(tzinfo=None),
tz=tz,
name=ax.name)
# a little hack
trimmed = False
if (len(binner) > 2 and binner[-2] == ax.max() and
self.closed == 'right'):
binner = binner[:-1]
trimmed = True
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed)
if self.closed == 'right':
labels = binner
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
else:
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[:len(bins)]
return binner, bins, labels
def _adjust_bin_edges(self, binner, ax_values):
# Some hacks for > daily data, see #1471, #1458, #1483
bin_edges = binner.asi8
if self.freq != 'D' and is_superperiod(self.freq, 'D'):
day_nanos = _delta_to_nanoseconds(timedelta(1))
if self.closed == 'right':
bin_edges = bin_edges + day_nanos - 1
# intraday values on last day
if bin_edges[-2] > ax_values.max():
bin_edges = bin_edges[:-1]
binner = binner[:-1]
return binner, bin_edges
def _get_time_period_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if not len(ax):
binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
labels = binner = PeriodIndex(start=ax[0],
end=ax[-1],
freq=self.freq,
name=ax.name)
end_stamps = (labels + 1).asfreq(self.freq, 's').to_timestamp()
if ax.tzinfo:
end_stamps = end_stamps.tz_localize(ax.tzinfo)
bins = ax.searchsorted(end_stamps, side='left')
return binner, bins, labels
@property
def _agg_method(self):
return self.how if self.how else _DEFAULT_METHOD
def _resample_timestamps(self):
# assumes set_grouper(obj) already called
axlabels = self.ax
self._get_binner_for_resample()
grouper = self.grouper
binner = self.binner
obj = self.obj
# Determine if we're downsampling
if axlabels.freq is not None or axlabels.inferred_freq is not None:
if len(grouper.binlabels) < len(axlabels) or self.how is not None:
# downsample
grouped = obj.groupby(grouper, axis=self.axis)
result = grouped.aggregate(self._agg_method)
# GH2073
if self.fill_method is not None:
result = result.fillna(method=self.fill_method,
limit=self.limit)
else:
# upsampling shortcut
if self.axis:
raise AssertionError('axis must be 0')
if self.closed == 'right':
res_index = binner[1:]
else:
res_index = binner[:-1]
# if we have the same frequency as our axis, then we are equal sampling
# even if how is None
if self.fill_method is None and self.limit is None and to_offset(
axlabels.inferred_freq) == self.freq:
result = obj.copy()
result.index = res_index
else:
result = obj.reindex(res_index, method=self.fill_method,
limit=self.limit)
else:
# Irregular data, have to use groupby
grouped = obj.groupby(grouper, axis=self.axis)
result = grouped.aggregate(self._agg_method)
if self.fill_method is not None:
result = result.fillna(method=self.fill_method,
limit=self.limit)
loffset = self.loffset
if isinstance(loffset, compat.string_types):
loffset = to_offset(self.loffset)
if isinstance(loffset, (DateOffset, timedelta)):
if (isinstance(result.index, DatetimeIndex)
and len(result.index) > 0):
result.index = result.index + loffset
return result
def _resample_periods(self):
# assumes set_grouper(obj) already called
axlabels = self.ax
obj = self.obj
if len(axlabels) == 0:
new_index = PeriodIndex(data=[], freq=self.freq)
return obj.reindex(new_index)
else:
start = axlabels[0].asfreq(self.freq, how=self.convention)
end = axlabels[-1].asfreq(self.freq, how='end')
new_index = period_range(start, end, freq=self.freq)
# Start vs. end of period
memb = axlabels.asfreq(self.freq, how=self.convention)
if is_subperiod(axlabels.freq, self.freq) or self.how is not None:
# Downsampling
rng = np.arange(memb.values[0], memb.values[-1] + 1)
bins = memb.searchsorted(rng, side='right')
grouper = BinGrouper(bins, new_index)
grouped = obj.groupby(grouper, axis=self.axis)
return grouped.aggregate(self._agg_method)
elif is_superperiod(axlabels.freq, self.freq):
# Get the fill indexer
indexer = memb.get_indexer(new_index, method=self.fill_method,
limit=self.limit)
return _take_new_index(obj, indexer, new_index, axis=self.axis)
else:
raise ValueError('Frequency %s cannot be resampled to %s'
% (axlabels.freq, self.freq))
def _take_new_index(obj, indexer, new_index, axis=0):
from pandas.core.api import Series, DataFrame
if isinstance(obj, Series):
new_values = | com.take_1d(obj.values, indexer) | pandas.core.common.take_1d |
import yfinance as yf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime
from datetime import timedelta, timezone
from risky_smart_weights import generate_risky_portfolio
def last_trading_day():
now = datetime.datetime.now(timezone(timedelta(hours=-5), 'EST'))
# US Markets close at 4pm, but afterhours trading ends at 8pm.
# yFinance stubbornly only gives the day's data after 8pm, so we will wait until 9pm to pull data from
# the current day.
market_close = now.replace(hour=21, minute=0, second=0, microsecond=0)
if now < market_close:
DELTA = 1
# If it is saturday or sunday
elif now.weekday() >= 5:
DELTA = 1
else:
DELTA = 0
start_date = (datetime.datetime.now() - timedelta(days=15)).strftime("%Y-%m-%d")
end_date = (datetime.datetime.now() - | pd.tseries.offsets.BDay(DELTA) | pandas.tseries.offsets.BDay |
import logging
import pandas as pd
import requests
import io
import re
import datetime
def create_elo_dict(db):
elo_dict = pd.read_csv('data/raw/elo_dictionary.csv', sep=';')[['fd.name', 'elo.name']]
elo_dict = elo_dict.rename(columns={'fd.name':'fd_name', 'elo.name':'elo_name'})
elo_dict['updated_untill'] = pd.to_datetime(None)
elo_dict.to_sql(name='elo_master', con=db, if_exists='replace', index=False)
db.execute("""update teams set elo_name = (
select `elo_name` from elo_master where teams.long_name = elo_master.`fd_name`)""")
def create_elo_scores(db):
logger = logging.getLogger(__name__)
teams = pd.read_sql("""select team_id, elo_name from teams""", db)
for idx, team in teams.iterrows():
try:
url = "http://api.clubelo.com/"+re.sub(' ', '', team.elo_name)
logger.info(url.strip())
s = requests.get(url).content
eloRank = pd.read_csv(io.StringIO(s.decode('utf-8')))
eloRank.From = pd.to_datetime(eloRank.From).dt.strftime('%Y-%m-%d')
eloRank.To = pd.to_datetime(eloRank.To).dt.strftime('%Y-%m-%d')
eloRank['team_id'] = team.team_id
now = datetime.datetime.now().strftime('%Y-%m-%d')
try:
last = pd.read_sql("""SELECT updated_untill FROM elo_master
WHERE elo_name = '{}' """.format(team.elo_name), db).updated_untill.values[0]
if last is None:
last = '2009-01-01'
except:
last = '2009-01-01'
logger.info('Last valid: {}'.format(last))
eloRank = eloRank.loc[(eloRank.From >= last)&(eloRank.To <= now)]
eloRank[['team_id', 'Club', 'Elo', 'From', 'To']].to_sql(name='temp_elo', con=db, if_exists='replace', index=False)
insert_elo_sql = ("""INSERT INTO elo_scores
SELECT * from temp_elo""")
db.execute(insert_elo_sql)
update_elo_master = ("""UPDATE elo_master SET updated_untill = '{to}'
WHERE elo_name = '{elo_name}'""".format(to=now, elo_name=team.elo_name))
db.execute(update_elo_master)
except Exception as e:
logger.error('Failed: ' + str(e))
logger.error('on {}'.format(team.elo_name))
def update_elo_scores(db):
logger = logging.getLogger(__name__)
teams = pd.read_sql("""select team_id, elo_name from teams
where team_id in (select HomeTeam FROM matches WHERE FTR IS NULL)
or team_id in (select AwayTeam FROM matches WHERE FTR IS NULL)""", db)
dates = pd.read_sql("""select Date FROM matches WHERE FTR IS NULL""", db)
for date in dates.Date.unique():
try:
url = "http://api.clubelo.com/"+str(date)
logger.info(url.strip())
s = requests.get(url).content
eloRank = pd.read_csv(io.StringIO(s.decode('utf-8')))
eloRank.From = pd.to_datetime(eloRank.From).dt.strftime('%Y-%m-%d')
eloRank.To = pd.to_datetime(eloRank.To).dt.strftime('%Y-%m-%d')
eloRank = | pd.merge(eloRank, teams, how='inner', right_on='elo_name', left_on='Club') | pandas.merge |
import Distances as d
import pandas as pd
import numpy as np
class CBSmot:
nano_to_seconds = 1000000000
def count_neighbors(self, traj, position, max_dist):
neighbors = 0
yet = True
j = position + 1
while j < len(traj.index) and yet:
if d.Distances.calculate_two_point_distance(traj.iloc[position]['lat'],
traj.iloc[position]['lon'],
traj.iloc[j]['lat'],
traj.iloc[j]['lon']) < max_dist:
neighbors += 1
else:
yet = False
j += 1
return neighbors
def centroid(self, subtraj):
x = 0
y = 0
for index, row in subtraj.iterrows():
x += row['lat']
y += row['lon']
return [x/len(subtraj.index), y/len(subtraj.index)]
def clean_stops(self, stops, min_time):
stops_aux = stops.copy()
for stop in stops:
p1 = stop.index.values[0]
p2 = stop.index.values[-1]
if (p2 - p1).item() / CBSmot.nano_to_seconds < min_time:
stops_aux.remove(stop)
return stops_aux
def clean_stops_segment(self, stops, min_time, index):
stops_aux = stops.copy()
i = 0
curr_idx=0
for stop in stops:
p1 = stop.index.values[0]
p2 = stop.index.values[-1]
if (p2 - p1).item() / CBSmot.nano_to_seconds < min_time:
stops_aux.pop(i)
index.pop(i)
else:
i += 1
return index, stops_aux
def merge_stop(self, stops, max_dist, time_tolerance):
i = 0
while i < len(stops):
if (i+1) < len(stops):
s1 = stops[i]
s2 = stops[i+1]
p2 = s2.index.values[0]
p1 = s1.index.values[-1]
if (p2 - p1).item() / CBSmot.nano_to_seconds <= time_tolerance:
c1 = self.centroid(s1)
c2 = self.centroid(s2)
if d.Distances.calculate_two_point_distance(c1[0], c1[1], c2[0], c2[1]) <= max_dist:
stops.pop(i+1)
s1.append(s2, ignore_index=True)
stops[i] = s1
i -= 1
i += 1
return stops
def merge_stop_segment(self, stops, max_dist, time_tolerance, index):
i = 0
while i < len(stops):
if (i+1) < len(stops):
s1 = stops[i]
s2 = stops[i+1]
p2 = s2.index.values[0]
p1 = s1.index.values[-1]
if (p2 - p1).item() / CBSmot.nano_to_seconds <= time_tolerance:
c1 = self.centroid(s1)
c2 = self.centroid(s2)
if d.Distances.calculate_two_point_distance(c1[0], c1[1], c2[0], c2[1]) <= max_dist:
index_i = index[i]
index_i_1 = index[i+1]
stops.pop(i+1)
index.pop(i+1)
s1.append(s2, ignore_index=True)
stops[i] = s1
index[i] = [index_i[0], index_i_1[-1]]
i -= 1
i += 1
return index, stops
def find_stops(self, traj, max_dist, min_time, time_tolerance, merge_tolerance):
neighborhood = [0]*len(traj.index)
stops = []
traj.sort_index(inplace=True)
j = 0
while j < len(traj.index):
valor = self.count_neighbors(traj, j, max_dist)
neighborhood[j] = valor
j += valor
j += 1
for i in range(len(neighborhood)):
if neighborhood[i] > 0:
p1 = pd.to_datetime(traj.iloc[i].name)
p2 = pd.to_datetime(traj.iloc[i + neighborhood[i]-1].name)
diff = (p2 - p1).total_seconds()
if diff >= time_tolerance:
stops.append(traj.loc[p1:p2])
stops = self.merge_stop(stops, max_dist, merge_tolerance)
stops = self.clean_stops(stops, min_time)
return stops
def segment_stops_moves(self, traj, max_dist, min_time, time_tolerance, merge_tolerance):
neighborhood = [0]*len(traj.index)
stops = []
index = []
traj.sort_index(inplace=True)
j = 0
while j < len(traj.index):
valor = self.count_neighbors(traj, j, max_dist)
neighborhood[j] = valor
j += valor
j += 1
#print(neighborhood)
for i in range(len(neighborhood)):
if neighborhood[i] > 0:
p1 = | pd.to_datetime(traj.iloc[i].name) | pandas.to_datetime |
from __future__ import print_function, division
import os, re, sys
import logging
from .config import on_rtd
if not on_rtd:
import numpy as np
import pandas as pd
from configobj import ConfigObj
from asciitree import LeftAligned, Traversal
from asciitree.drawing import BoxStyle, BOX_DOUBLE, BOX_BLANK
from collections import OrderedDict
from itertools import chain, count
try:
from itertools import imap, izip
except ImportError: # Python 3
imap = map
izip = zip
xrange = range
else:
class Traversal(object):
pass
class LeftAligned(object):
pass
from .isochrone import get_ichrone
from .utils import addmags, distance
class NodeTraversal(Traversal):
"""
Custom subclass to traverse tree for ascii printing
"""
def __init__(self, pars=None, **kwargs):
self.pars = pars
super(NodeTraversal,self).__init__(**kwargs)
def get_children(self, node):
return node.children
def get_root(self, node):
return node
return node.get_root()
def get_text(self, node):
text = node.label
if self.pars is not None:
if hasattr(node, 'model_mag'):
text += '; model={:.2f} ({})'.format(node.model_mag(self.pars),
node.lnlike(self.pars))
if type(node)==ModelNode:
root = node.get_root()
if hasattr(root, 'spectroscopy'):
if node.label in root.spectroscopy:
for k,v in root.spectroscopy[node.label].items():
text += ', {}={}'.format(k,v)
modval = node.evaluate(self.pars[node.label], k)
lnl = -0.5*(modval - v[0])**2/v[1]**2
text += '; model={} ({})'.format(modval, lnl)
if node.label in root.limits:
for k,v in root.limits[node.label].items():
text += ', {} limits={}'.format(k,v)
text += ': {}'.format(self.pars[node.label])
else:
if type(node)==ModelNode:
root = node.get_root()
if hasattr(root, 'spectroscopy'):
if node.label in root.spectroscopy:
for k,v in root.spectroscopy[node.label].items():
text += ', {}={}'.format(k,v)
if node.label in root.limits:
for k,v in root.limits[node.label].items():
text += ', {} limits={}'.format(k,v)
#root = node.get_root()
#if hasattr(root,'spectroscopy'):
# if node.label in root.spectroscopy:
# for k,v in root.spectroscopy[node.label].items():
# model = node.evaluate(self.pars[node.label], k)
# text += '\n {}={} (model={})'.format(k,v,model)
return text
class MyLeftAligned(LeftAligned):
"""For custom ascii tree printing
"""
pars = None
def __init__(self, pars=None, **kwargs):
self.pars = pars
self.traverse = NodeTraversal(pars)
super(MyLeftAligned,self).__init__(**kwargs)
class Node(object):
def __init__(self, label):
self.label = label
self.parent = None
self.children = []
self._leaves = None
def __iter__(self):
"""
Iterate through tree, leaves first
following http://stackoverflow.com/questions/6914803/python-iterator-through-tree-with-list-of-children
"""
for node in chain(*imap(iter, self.children)):
yield node
yield self
def __getitem__(self, ind):
for n,i in izip(self, count()):
if i==ind:
return n
@property
def is_root(self):
return self.parent is None
def get_root(self):
if self.is_root:
return self
else:
return self.parent.get_root()
def get_ancestors(self):
if self.parent.is_root:
return []
else:
return [self.parent] + self.parent.get_ancestors()
def print_ascii(self, fout=None, pars=None):
box_tr = MyLeftAligned(pars,draw=BoxStyle(gfx=BOX_DOUBLE, horiz_len=1))
if fout is None:
print(box_tr(self))
else:
fout.write(box_tr(self))
@property
def is_leaf(self):
return len(self.children)==0 and not self.is_root
def _clear_leaves(self):
self._leaves = None
def _clear_all_leaves(self):
if not self.is_root:
self.parent._clear_all_leaves()
self._clear_leaves()
def add_child(self, node):
node.parent = self
self.children.append(node)
self._clear_all_leaves()
def remove_children(self):
self.children = []
self._clear_all_leaves()
def remove_child(self, label):
"""
Removes node by label
"""
ind = None
for i,c in enumerate(self.children):
if c.label==label:
ind = i
if ind is None:
logging.warning('No child labeled {}.'.format(label))
return
self.children.pop(ind)
self._clear_all_leaves()
def attach_to_parent(self, node):
# detach from current parent, if necessary
if self.parent is not None:
self.parent.remove_child(self.label)
node.children += [self]
self.parent = node
self._clear_all_leaves()
@property
def leaves(self):
if self._leaves is None:
self._leaves = self._get_leaves()
return self._leaves
def _get_leaves(self):
if self.is_leaf:
return [self]
else:
leaves = []
for c in self.children:
leaves += c._get_leaves()
return leaves
def select_leaves(self, name):
"""Returns all leaves under all nodes matching name
"""
if self.is_leaf:
return [self] if re.search(name, self.label) else []
else:
leaves = []
if re.search(name, self.label):
for c in self.children:
leaves += c._get_leaves() #all leaves
else:
for c in self.children:
leaves += c.select_leaves(name) #only matching ones
return leaves
@property
def leaf_labels(self):
return [l.label for l in self.leaves]
def get_leaf(self, label):
for l in self.leaves:
if label==l.label:
return l
def get_obs_nodes(self):
return [l for l in self if isinstance(l, ObsNode)]
@property
def obs_leaf_nodes(self):
return self.get_obs_leaves()
def get_obs_leaves(self):
"""Returns the last obs nodes that are leaves
"""
obs_leaves = []
for n in self:
if n.is_leaf:
if isinstance(n, ModelNode):
l = n.parent
else:
l = n
if l not in obs_leaves:
obs_leaves.append(l)
return obs_leaves
def get_model_nodes(self):
return [l for l in self._get_leaves() if isinstance(l, ModelNode)]
@property
def N_model_nodes(self):
return len(self.get_model_nodes())
def print_tree(self):
print(self.label)
def __str__(self):
return self.label
def __repr__(self):
if self.is_leaf:
s = "<{} '{}', parent='{}'>".format(self.__class__,
self.label,
self.parent)
else:
child_labels = [str(c) for c in self.children]
s = "<{} '{}', parent='{}', children={}>".format(self.__class__,
self.label,
self.parent,
child_labels)
return s
class ObsNode(Node):
def __init__(self, observation, source, ref_node=None):
self.observation = observation
self.source = source
self.reference = ref_node
self.children = []
self.parent = None
self._leaves = None
#indices of underlying models, defining physical systems
self._inds = None
self._n_params = None
self._Nstars = None
#for model_mag caching
self._cache_key = None
self._cache_val = None
@property
def instrument(self):
return self.observation.name
@property
def band(self):
return self.observation.band
@property
def value(self):
return (self.source.mag, self.source.e_mag)
@property
def resolution(self):
return self.observation.resolution
@property
def relative(self):
return self.source.relative
@property
def separation(self):
return self.source.separation
@property
def pa(self):
return self.source.pa
@property
def value_str(self):
return '({:.2f}, {:.2f})'.format(*self.value)
def distance(self, other):
"""Coordinate distance from another ObsNode
"""
return distance((self.separation, self.pa), (other.separation, other.pa))
def _in_same_observation(self, other):
return self.instrument==other.instrument and self.band==other.band
@property
def n_params(self):
if self._n_params is None:
self._n_params = 5 * len(self.leaves)
return self._n_params
def _get_inds(self):
inds = [n.index for n in self.leaves]
inds = sorted(list(set(inds)))
return inds
def _clear_leaves(self):
self._leaves = None
self._inds = None
self._n_params = None
self._Nstars = None
@property
def Nstars(self):
"""
dictionary of number of stars per system
"""
if self._Nstars is None:
N = {}
for n in self.get_model_nodes():
if n.index not in N:
N[n.index] = 1
else:
N[n.index] += 1
self._Nstars = N
return self._Nstars
@property
def systems(self):
lst = sorted(self.Nstars.keys())
return lst
@property
def inds(self):
if self._inds is None:
self._inds = self._get_inds()
return self._inds
@property
def label(self):
if self.source.relative:
band_str = 'delta-{}'.format(self.band)
else:
band_str = self.band
return '{} {}={} @({:.2f}, {:.0f} [{:.2f}])'.format(self.instrument,
band_str,
self.value_str, self.separation, self.pa,
self.resolution)
@property
def obsname(self):
return '{}-{}'.format(self.instrument, self.band)
def get_system(self, ind):
system = []
for l in self.get_root().leaves:
try:
if l.index==ind:
system.append(l)
except AttributeError:
pass
return system
def add_model(self, ic, N=1, index=0):
"""
Should only be able to do this to a leaf node.
Either N and index both integers OR index is
list of length=N
"""
if type(index) in [list,tuple]:
if len(index) != N:
raise ValueError('If a list, index must be of length N.')
else:
index = [index]*N
for idx in index:
existing = self.get_system(idx)
tag = len(existing)
self.add_child(ModelNode(ic, index=idx, tag=tag))
def model_mag(self, pardict, use_cache=True):
"""
pardict is a dictionary of parameters for all leaves
gets converted back to traditional parameter vector
"""
if pardict == self._cache_key and use_cache:
#print('{}: using cached'.format(self))
return self._cache_val
#print('{}: calculating'.format(self))
self._cache_key = pardict
# Generate appropriate parameter vector from dictionary
p = []
for l in self.leaf_labels:
p.extend(pardict[l])
assert len(p) == self.n_params
tot = np.inf
#print('Building {} mag for {}:'.format(self.band, self))
for i,m in enumerate(self.leaves):
mag = m.evaluate(p[i*5:(i+1)*5], self.band)
# logging.debug('{}: mag={}'.format(self,mag))
#print('{}: {}({}) = {}'.format(m,self.band,p[i*5:(i+1)*5],mag))
tot = addmags(tot, mag)
self._cache_val = tot
return tot
def lnlike(self, pardict, use_cache=True):
"""
returns log-likelihood of this observation
pardict is a dictionary of parameters for all leaves
gets converted back to traditional parameter vector
"""
mag, dmag = self.value
if np.isnan(dmag):
return 0
if self.relative:
# If this *is* the reference, just return
if self.reference is None:
return 0
mod = (self.model_mag(pardict, use_cache=use_cache) -
self.reference.model_mag(pardict, use_cache=use_cache))
mag -= self.reference.value[0]
else:
mod = self.model_mag(pardict, use_cache=use_cache)
lnl = -0.5*(mag - mod)**2 / dmag**2
# logging.debug('{} {}: mag={}, mod={}, lnlike={}'.format(self.instrument,
# self.band,
# mag,mod,lnl))
return lnl
class DummyObsNode(ObsNode):
def __init__(self, *args, **kwargs):
self.observation = None
self.source = None
self.reference = None
self.children = []
self.parent = None
self._leaves = None
#indices of underlying models, defining physical systems
self._inds = None
self._n_params = None
self._Nstars = None
#for model_mag caching
self._cache_key = None
self._cache_val = None
@property
def label(self):
return '[dummy]'
@property
def value(self):
return None, None
def lnlike(self, *args, **kwargs):
return 0
class ModelNode(Node):
"""
These are always leaves; leaves are always these.
Index keeps track of which physical system node is in.
"""
def __init__(self, ic, index=0, tag=0):
self._ic = ic
self.index = index
self.tag = tag
self.children = []
self.parent = None
self._leaves = None
@property
def label(self):
return '{}_{}'.format(self.index, self.tag)
@property
def ic(self):
if type(self._ic)==type:
self._ic = self._ic()
return self._ic
def get_obs_ancestors(self):
nodes = self.get_ancestors()
return [n for n in nodes if isinstance(n, ObsNode)]
@property
def contributing_observations(self):
"""The instrument-band for all the observations feeding into this model node
"""
return [n.obsname for n in self.get_obs_ancestors()]
def evaluate(self, p, prop):
if prop in self.ic.bands:
return self.evaluate_mag(p, prop)
elif prop=='mass':
return p[0]
elif prop=='age':
return p[1]
elif prop=='feh':
return p[2]
elif prop in ['Teff','logg','radius']:
return getattr(self.ic, prop)(*p[:3])
else:
raise ValueError('property {} cannot be evaluated by Isochrone.'.format(prop))
def evaluate_mag(self, p, band):
return self.ic.mag[band](*p)
def lnlike(self, *args, **kwargs):
return 0
class Source(object):
def __init__(self, mag, e_mag, separation=0., pa=0.,
relative=False, is_reference=False):
self.mag = float(mag)
self.e_mag = float(e_mag)
self.separation = float(separation)
self.pa = float(pa)
self.relative = bool(relative)
self.is_reference = bool(is_reference)
def __str__(self):
return '({}, {}) @({}, {})'.format(self.mag, self.e_mag,
self.separation, self.pa)
def __repr__(self):
return self.__str__()
class Star(object):
"""Theoretical counterpart of Source.
"""
def __init__(self, pars, separation, pa):
self.pars = pars
self.separation = separation
self.pa = pa
def distance(self, other):
return distance((self.separation, self.pa),
(other.separation, other.pa))
class Observation(object):
"""
Contains relevant information about imaging observation
name: identifying string (typically the instrument)
band: photometric bandpass
resolution: *approximate* angular resolution of instrument.
used for source matching between observations
sources: list of Source objects
"""
def __init__(self, name, band, resolution, sources=None,
relative=False):
self.name = name
self.band = band
self.resolution = resolution
if sources is not None:
if not np.all(type(s)==Source for s in sources):
raise ValueError('Source list must be all Source objects.')
self.sources = []
if sources is None:
sources = []
for s in sources:
self.add_source(s)
self.relative = relative
self._set_reference()
def observe(self, stars, unc, ic=None):
"""Creates and adds appropriate synthetic Source objects for list of stars (max 2 for now)
"""
if ic is None:
ic = get_ichrone('mist')
if len(stars) > 2:
raise NotImplementedError('No support yet for > 2 synthetic stars')
mags = [ic(*s.pars)['{}_mag'.format(self.band)].values[0] for s in stars]
d = stars[0].distance(stars[1])
if d < self.resolution:
mag = addmags(*mags) + unc*np.random.randn()
sources = [Source(mag, unc, stars[0].separation, stars[0].pa,
relative=self.relative)]
else:
mags = np.array([m + unc*np.random.randn() for m in mags])
if self.relative:
mags -= mags.min()
sources = [Source(m, unc, s.separation, s.pa, relative=self.relative)
for m,s in zip(mags, stars)]
for s in sources:
self.add_source(s)
self._set_reference()
def add_source(self, source):
"""
Adds source to observation, keeping sorted order (in separation)
"""
if not type(source)==Source:
raise TypeError('Can only add Source object.')
if len(self.sources)==0:
self.sources.append(source)
else:
ind = 0
for s in self.sources:
# Keep sorted order of separation
if source.separation < s.separation:
break
ind += 1
self.sources.insert(ind, source)
#self._set_reference()
@property
def brightest(self):
mag0 = np.inf
s0 = None
for s in self.sources:
if s.mag < mag0:
mag0 = s.mag
s0 = s
return s0
def _set_reference(self):
"""If relative, make sure reference node is set to brightest.
"""
if len(self.sources) > 0:
self.brightest.is_reference = True
def __str__(self):
return '{}-{}'.format(self.name, self.band)
def __repr__(self):
return str(self)
class ObservationTree(Node):
"""Builds a tree of Nodes from a list of Observation objects
Organizes Observations from smallest to largest resolution,
and at each stage attaches each source to the most probable
match from the previous Observation. Admittedly somewhat hack-y,
but should *usually* do the right thing. Check out `obs.print_ascii()`
to visualize what this has done.
"""
spec_props = ['Teff', 'logg', 'feh']
def __init__(self, observations=None, name=None):
if observations is None:
observations = []
if name is None:
self.label = 'root'
else:
self.label = name
self.parent = None
self._observations = []
self._build_tree()
[self.add_observation(obs) for obs in observations]
self._N = None
self._index = None
# Spectroscopic properties
self.spectroscopy = {}
# Limits (such as minimum on logg)
self.limits = {}
# Parallax measurements
self.parallax = {}
# This will be calculated and set at first access
self._Nstars = None
#likelihood cache
self._cache_key = None
self._cache_val = None
@property
def name(self):
return self.label
def _clear_cache(self):
self._cache_key = None
self._cache_val = None
@classmethod
def from_df(cls, df, **kwargs):
"""
DataFrame must have the right columns.
these are: name, band, resolution, mag, e_mag, separation, pa
"""
tree = cls(**kwargs)
for (n,b), g in df.groupby(['name','band']):
#g.sort('separation', inplace=True) #ensures that the first is reference
sources = [Source(**s[['mag','e_mag','separation','pa','relative']])
for _,s in g.iterrows()]
obs = Observation(n, b, g.resolution.mean(),
sources=sources, relative=g.relative.any())
tree.add_observation(obs)
# For all relative mags, set reference to be brightest
return tree
@classmethod
def from_ini(cls, filename):
config = ConfigObj(filename)
def to_df(self):
"""
Returns DataFrame with photometry from observations organized.
This DataFrame should be able to be read back in to
reconstruct the observation.
"""
df = pd.DataFrame()
name = []
band = []
resolution = []
mag = []
e_mag = []
separation = []
pa = []
relative = []
for o in self._observations:
for s in o.sources:
name.append(o.name)
band.append(o.band)
resolution.append(o.resolution)
mag.append(s.mag)
e_mag.append(s.e_mag)
separation.append(s.separation)
pa.append(s.pa)
relative.append(s.relative)
return pd.DataFrame({'name':name,'band':band,'resolution':resolution,
'mag':mag,'e_mag':e_mag,'separation':separation,
'pa':pa,'relative':relative})
def save_hdf(self, filename, path='', overwrite=False, append=False):
"""
Writes all info necessary to recreate object to HDF file
Saves table of photometry in DataFrame
Saves model specification, spectroscopy, parallax to attrs
"""
if os.path.exists(filename):
store = | pd.HDFStore(filename) | pandas.HDFStore |
# -*- coding: utf-8 -*-
# medusa data file format: FZJ-EZ-2017
import datetime
import pandas as pd
import numpy as np
def _extract_md(mat, **kwargs):
md = mat['MD'].squeeze()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, md.size):
# print('Frequency: ', emd[f_id]['fm'])
fdata = md[f_id]
# for name in fdata.dtype.names:
# print(name, fdata[name].shape)
timestamp = np.atleast_2d(
[convert_epoch(x) for x in fdata['Time'].squeeze()]
).T
df = pd.DataFrame(
np.hstack((
timestamp,
fdata['cni'],
fdata['U0'][:, np.newaxis],
fdata['Cl3'],
fdata['Zg3'],
fdata['As3'][:, 0, :].squeeze(),
fdata['As3'][:, 1, :].squeeze(),
fdata['As3'][:, 2, :].squeeze(),
fdata['As3'][:, 3, :].squeeze(),
fdata['Is3'],
fdata['Yl3'],
fdata['Il3'],
))
)
df.columns = (
'datetime',
'a',
'b',
'U0',
'Cl1',
'Cl2',
'Cl3',
'Zg1',
'Zg2',
'Zg3',
'ShuntVoltage1_1',
'ShuntVoltage1_2',
'ShuntVoltage1_3',
'ShuntVoltage2_1',
'ShuntVoltage2_2',
'ShuntVoltage2_3',
'ShuntVoltage3_1',
'ShuntVoltage3_2',
'ShuntVoltage3_3',
'ShuntVoltage4_1',
'ShuntVoltage4_2',
'ShuntVoltage4_3',
'Is1',
'Is2',
'Is3',
'Yl1',
'Yl2',
'Yl3',
'Il1',
'Il2',
'Il3',
)
df['datetime'] = pd.to_datetime(df['datetime'])
df['a'] = df['a'].astype(int)
df['b'] = df['b'].astype(int)
df['Cl1'] = df['Cl1'].astype(complex)
df['Cl2'] = df['Cl2'].astype(complex)
df['Cl3'] = df['Cl3'].astype(complex)
df['Zg1'] = df['Zg1'].astype(complex)
df['Zg2'] = df['Zg2'].astype(complex)
df['Zg3'] = df['Zg3'].astype(complex)
df['Yl1'] = df['Yl1'].astype(complex)
df['Yl2'] = df['Yl2'].astype(complex)
df['Yl3'] = df['Yl3'].astype(complex)
for key in ('Il1', 'Il2', 'Il3'):
df[key] = df[key].astype(complex)
df['ShuntVoltage1_1'] = df['ShuntVoltage1_1'].astype(complex)
df['ShuntVoltage1_2'] = df['ShuntVoltage1_2'].astype(complex)
df['ShuntVoltage1_3'] = df['ShuntVoltage1_3'].astype(complex)
df['ShuntVoltage2_1'] = df['ShuntVoltage2_1'].astype(complex)
df['ShuntVoltage2_2'] = df['ShuntVoltage2_2'].astype(complex)
df['ShuntVoltage2_3'] = df['ShuntVoltage2_3'].astype(complex)
df['ShuntVoltage3_1'] = df['ShuntVoltage3_1'].astype(complex)
df['ShuntVoltage3_2'] = df['ShuntVoltage3_2'].astype(complex)
df['ShuntVoltage3_3'] = df['ShuntVoltage3_3'].astype(complex)
df['ShuntVoltage4_1'] = df['ShuntVoltage4_1'].astype(complex)
df['ShuntVoltage4_2'] = df['ShuntVoltage4_2'].astype(complex)
df['ShuntVoltage4_3'] = df['ShuntVoltage4_3'].astype(complex)
df['Is1'] = df['Is1'].astype(complex)
df['Is2'] = df['Is2'].astype(complex)
df['Is3'] = df['Is3'].astype(complex)
df['Is'] = np.mean(df[['Is1', 'Is2', 'Is3']].values, axis=1)
# "standard" injected current, in [mA]
df['Iab'] = np.abs(df['Is']) * 1e3
df['Iab'] = df['Iab'].astype(float)
df['Il'] = np.mean(df[['Il1', 'Il2', 'Il3']].values, axis=1)
# take absolute value and convert to mA
df['Ileakage'] = np.abs(df['Il']) * 1e3
df['Ileakage'] = df['Ileakage'].astype(float)
df['Zg'] = np.mean(df[['Zg1', 'Zg2', 'Zg3']], axis=1)
df['frequency'] = np.ones(df.shape[0]) * fdata['fm']
dfl.append(df)
df = pd.concat(dfl)
return df
def _extract_emd(mat, **kwargs):
"""Extract the data from the EMD substruct, given a medusa-created MNU0-mat
file
Parameters
----------
mat: matlab-imported struct
"""
emd = mat['EMD'].squeeze()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, emd.size):
# print('Frequency: ', emd[f_id]['fm'])
fdata = emd[f_id]
# some consistency checks
if len(fdata['nu']) == 2 and fdata['nu'].shape[1] == 2:
raise Exception('Need MNU0 file, not a quadpole .mat file:')
timestamp = np.atleast_2d(
[convert_epoch(x) for x in fdata['Time'].squeeze()]
).T
df = pd.DataFrame(
np.hstack((
timestamp,
fdata['ni'],
fdata['nu'][:, np.newaxis],
fdata['Zt3'],
fdata['Is3'],
fdata['Il3'],
fdata['Zg3'],
fdata['As3'][:, 0, :].squeeze(),
fdata['As3'][:, 1, :].squeeze(),
fdata['As3'][:, 2, :].squeeze(),
fdata['As3'][:, 3, :].squeeze(),
fdata['Yg13'],
fdata['Yg23'],
)),
)
df.columns = (
'datetime',
'a',
'b',
'p',
'Z1',
'Z2',
'Z3',
'Is1',
'Is2',
'Is3',
'Il1',
'Il2',
'Il3',
'Zg1',
'Zg2',
'Zg3',
'ShuntVoltage1_1',
'ShuntVoltage1_2',
'ShuntVoltage1_3',
'ShuntVoltage2_1',
'ShuntVoltage2_2',
'ShuntVoltage2_3',
'ShuntVoltage3_1',
'ShuntVoltage3_2',
'ShuntVoltage3_3',
'ShuntVoltage4_1',
'ShuntVoltage4_2',
'ShuntVoltage4_3',
'Yg13_1',
'Yg13_2',
'Yg13_3',
'Yg23_1',
'Yg23_2',
'Yg23_3',
)
df['frequency'] = np.ones(df.shape[0]) * fdata['fm']
# cast to correct type
df['datetime'] = | pd.to_datetime(df['datetime']) | pandas.to_datetime |
import os
import glob
import pandas as pd
# man, my original code kept failing because I was using proper practices like inplace=True, whereas
# the test kept wanting me to redeclare the variable.
# create a list of the location of all the game files that end in 'eve' from the games directory
game_files = glob.glob(os.path.join(os.getcwd(),'games','*.EVE'))
game_files.sort()
#game_frames list to hold the generated dataframes from the for in loop below
game_frames = []
for game_file in game_files:
game_frame = pd.read_csv(game_file,names=['type', 'multi2', 'multi3', 'multi4', 'multi5', 'multi6', 'event'])
game_frames.append(game_frame)
# this concats all the dataframes in the game_frames list into one dataframe
#games = pd.concat(game_frames, ignore_index=True)
games = pd.concat(game_frames)
games.loc[games['multi5'] == '??', ['multi5']] = ''
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
identifiers = identifiers.fillna(method='ffill')
identifiers.columns = ['game_id', 'year']
games = | pd.concat([games,identifiers],axis=1,sort=False) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 5 13:17:22 2021
@author: trduong
"""
# import os, sys;
# sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import pandas as pd
import numpy as np
import logging
import yaml
import pyro
import torch
import pyro.distributions as dist
import argparse
import sys
import pprint
import gc
from utils.evaluate_func import evaluate_pred, evaluate_distribution, evaluate_fairness
from utils.helpers import load_config
def evaluate_law(df, df_result, col):
sensitive_att = ['race', 'sex']
target = 'ZFYA'
for m in col:
print(m, sensitive_att)
performance_reg = evaluate_pred(df[m].values, df[target].values)
performance_fairness = evaluate_fairness(sensitive_att, df, m)
performance_reg.update(performance_fairness)
performance_reg['method'] = m
df_result = df_result.append(performance_reg, ignore_index=True)
return df_result
if __name__ == "__main__":
"""Load configuration"""
config_path = "/home/trduong/Data/counterfactual_fairness_game_theoric/configuration.yml"
conf = load_config(config_path)
"""Parsing argument"""
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default=conf["ivr_law"])
parser.add_argument('--result_path', type=str, default=conf["evaluate_law"])
args = parser.parse_args()
data_path = args.data_path
result_path = args.result_path
"""Set up logging"""
logger = logging.getLogger('genetic')
file_handler = logging.FileHandler(filename=conf['evaluate_law_log'])
stdout_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
file_handler.setFormatter(formatter)
stdout_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
logger.setLevel(logging.DEBUG)
"""Load data"""
col = ["full_linear", "full_net",
"unaware_linear", "unaware_net",
"level2_lin_True", "level2_lin_False",
"level3_lin_True", "level3_lin_False",
"AL_prediction", "GL_prediction", "GD_prediction"]
df2 = pd.read_csv(data_path)
df1 = pd.read_csv(conf['law_baseline'])
df2 = df2.drop(columns = ['LSAT','UGPA','ZFYA', 'race','sex'])
df = pd.concat([df1, df2], axis=1)
df_result = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
from tqdm.notebook import tqdm
import random
import sklearn.metrics
from sklearn.pipeline import Pipeline
# For XGBoost Regression and Classification
import xgboost as xgb
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, cross_val_score, KFold
from sklearn.metrics import mean_squared_error, f1_score, r2_score, mean_absolute_error
import catboost
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
import lightgbm
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import VotingRegressor
class ModelsParameters:
def __init__(self, dictionary_params):
self.dictionary_params = dictionary_params
""" I need this function for having all keys for the coming functions """
## Functions for creating a dictionary by simply inputting values of the params for
## each type of estimator
# Create dictionary with all params of RandomForest
def random_forest_params(
n_estimators=[100], # The number of trees in the forest.
criterion=['mse'], # {“mse”, “mae”}, default=”mse”. The function to measure the quality of a split.
max_depth=[None], # The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples
min_samples_split=[2], # The minimum number of samples required to split an internal node
min_samples_leaf=[1], # The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf=[0.0], # The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node
max_features=['auto'], # {“auto”, “sqrt”, “log2”}, int or float, default=”auto” The number of features to consider when looking for the best split. If auto, == n_features [if not so many]
max_leaf_nodes=[None], # pruning? [FIX]
min_impurity_decrease=[0.0],
min_impurity_split=[None],
bootstrap=[True],
oob_score=[False], # whether to use out-of-bag samples to estimate the R^2 on unseen data [should be true? FIX but then it will be less comparable]
n_jobs=[None],
random_state=[None],
verbose=[0],
warm_start=[False],
ccp_alpha=[0.0], # Complexity parameter used for Minimal Cost-Complexity Pruning [FIX]. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen.
max_samples=[None], # bootstrap is True, the number of samples to draw from X to train each base estimator. If None, == to X.shape[0]
):
params_dict = {
'n_estimators': [n_estimators][0],
'criterion': [criterion][0],
'max_depth': [max_depth][0],
'min_samples_split': [min_samples_split][0],
'min_samples_leaf': [min_samples_leaf][0],
'min_weight_fraction_leaf': [min_weight_fraction_leaf][0],
'max_features': [max_features][0],
'max_leaf_nodes': [max_leaf_nodes][0],
'min_impurity_decrease': [min_impurity_decrease][0],
'min_impurity_split': [min_impurity_split][0],
'bootstrap': [bootstrap][0],
'oob_score': [oob_score][0],
'n_jobs': [n_jobs][0],
'random_state': [random_state][0],
'verbose': [verbose][0],
'warm_start': [warm_start][0],
'ccp_alpha': [ccp_alpha][0],
'max_samples': [max_samples][0],
}
return params_dict
def rf_params_pipeline(self, existing_prefix='', prefix_to_add='rf__'):
params_dict = {
prefix_to_add+'n_estimators': self.dictionary_params[existing_prefix+'n_estimators'],
prefix_to_add+'criterion': self.dictionary_params[existing_prefix+'criterion'],
prefix_to_add+'max_depth': self.dictionary_params[existing_prefix+'max_depth'],
prefix_to_add+'min_samples_split': self.dictionary_params[existing_prefix+'min_samples_split'],
prefix_to_add+'min_samples_leaf': self.dictionary_params[existing_prefix+'min_samples_leaf'],
prefix_to_add+'min_weight_fraction_leaf': self.dictionary_params[existing_prefix+'min_weight_fraction_leaf'],
prefix_to_add+'max_features': self.dictionary_params[existing_prefix+'max_features'],
prefix_to_add+'max_leaf_nodes': self.dictionary_params[existing_prefix+'max_leaf_nodes'],
prefix_to_add+'min_impurity_decrease': self.dictionary_params[existing_prefix+'min_impurity_decrease'],
prefix_to_add+'min_impurity_split': self.dictionary_params[existing_prefix+'min_impurity_split'],
prefix_to_add+'bootstrap': self.dictionary_params[existing_prefix+'bootstrap'],
prefix_to_add+'oob_score': self.dictionary_params[existing_prefix+'oob_score'],
prefix_to_add+'n_jobs': self.dictionary_params[existing_prefix+'n_jobs'],
prefix_to_add+'random_state': self.dictionary_params[existing_prefix+'random_state'],
prefix_to_add+'verbose': self.dictionary_params[existing_prefix+'verbose'],
prefix_to_add+'warm_start': self.dictionary_params[existing_prefix+'warm_start'],
prefix_to_add+'ccp_alpha': self.dictionary_params[existing_prefix+'ccp_alpha'],
prefix_to_add+'max_samples': self.dictionary_params[existing_prefix+'max_samples'],
}
return params_dict
def adaboost_params(
base_estimator=[None],
n_estimators=[50],
learning_rate=[1.0],
loss=['linear'],
random_state=[None]
):
params_dict = {
'base_estimator': [base_estimator][0],
'n_estimators': [n_estimators][0],
'learning_rate': [learning_rate][0],
'loss': [loss][0],
'random_state': [random_state][0]
}
return params_dict
def ab_params_pipeline(self, existing_prefix='', prefix_to_add='ab__'):
params_dict = {
prefix_to_add+'base_estimator': self.dictionary_params[existing_prefix+'base_estimator'],
prefix_to_add+'n_estimators': self.dictionary_params[existing_prefix+'n_estimators'],
prefix_to_add+'learning_rate': self.dictionary_params[existing_prefix+'learning_rate'],
prefix_to_add+'loss': self.dictionary_params[existing_prefix+'loss'],
prefix_to_add+'random_state': self.dictionary_params[existing_prefix+'random_state'],
}
return params_dict
def gradientboost_params(
loss=['ls'],
learning_rate=[0.1],
n_estimators=[100],
subsample=[1.0],
criterion=['friedman_mse'],
min_samples_split=[2],
min_samples_leaf=[1],
min_weight_fraction_leaf=[0.0],
max_depth=[3],
min_impurity_decrease=[0.0],
# min_impurity_split=[None], # deprecated FIX
init=[None],
random_state=[None],
max_features=[None],
alpha=[0.9],
verbose=[0],
max_leaf_nodes=[None],
warm_start=[False],
presort=['deprecated'],
validation_fraction=[0.1],
n_iter_no_change=[None],
tol=[0.0001],
ccp_alpha=[0.0],
):
params_dict = {
'loss': [loss][0],
'learning_rate': [learning_rate][0],
'n_estimators': [n_estimators][0],
'subsample': [subsample][0],
'criterion': [criterion][0],
'min_samples_split': [min_samples_split][0],
'min_samples_leaf': [min_samples_leaf][0],
'min_weight_fraction_leaf': [min_weight_fraction_leaf][0],
'max_depth': [max_depth][0],
'min_impurity_decrease': [min_impurity_decrease][0],
# 'min_impurity_split': [min_impurity_split][0],
'init': [init][0],
'random_state': [random_state][0],
'max_features': [max_features][0],
'alpha': [alpha][0],
'verbose': [verbose][0],
'max_leaf_nodes': [max_leaf_nodes][0],
'warm_start': [warm_start][0],
'presort': [presort][0],
'validation_fraction': [validation_fraction][0],
'n_iter_no_change': [n_iter_no_change][0],
'tol': [tol][0],
'ccp_alpha': [ccp_alpha][0],
}
return params_dict
def gb_params_pipeline(self, existing_prefix='', prefix_to_add='gb__'):
params_dict = {
prefix_to_add+'loss': self.dictionary_params[existing_prefix+'loss'],
prefix_to_add+'learning_rate': self.dictionary_params[existing_prefix+'learning_rate'],
prefix_to_add+'n_estimators': self.dictionary_params[existing_prefix+'n_estimators'],
prefix_to_add+'subsample': self.dictionary_params[existing_prefix+'subsample'],
prefix_to_add+'criterion': self.dictionary_params[existing_prefix+'criterion'],
prefix_to_add+'min_samples_split': self.dictionary_params[existing_prefix+'min_samples_split'],
prefix_to_add+'min_samples_leaf': self.dictionary_params[existing_prefix+'min_samples_leaf'],
prefix_to_add+'min_weight_fraction_leaf': self.dictionary_params[existing_prefix+'min_weight_fraction_leaf'],
prefix_to_add+'max_depth': self.dictionary_params[existing_prefix+'max_depth'],
prefix_to_add+'min_impurity_decrease': self.dictionary_params[existing_prefix+'min_impurity_decrease'],
# prefix_to_add+'min_impurity_split': self.dictionary_params[existing_prefix+'min_impurity_split'],
prefix_to_add+'init': self.dictionary_params[existing_prefix+'init'],
prefix_to_add+'random_state': self.dictionary_params[existing_prefix+'random_state'],
prefix_to_add+'max_features': self.dictionary_params[existing_prefix+'max_features'],
prefix_to_add+'alpha': self.dictionary_params[existing_prefix+'alpha'],
prefix_to_add+'verbose': self.dictionary_params[existing_prefix+'verbose'],
prefix_to_add+'max_leaf_nodes': self.dictionary_params[existing_prefix+'max_leaf_nodes'],
prefix_to_add+'warm_start': self.dictionary_params[existing_prefix+'warm_start'],
prefix_to_add+'presort': self.dictionary_params[existing_prefix+'presort'],
prefix_to_add+'validation_fraction': self.dictionary_params[existing_prefix+'validation_fraction'],
prefix_to_add+'n_iter_no_change': self.dictionary_params[existing_prefix+'n_iter_no_change'],
prefix_to_add+'tol': self.dictionary_params[existing_prefix+'tol'],
prefix_to_add+'ccp_alpha': self.dictionary_params[existing_prefix+'ccp_alpha'],
}
return params_dict
# XGBoost
def xgb_params(
objective=['reg:squarederror'],
n_estimators=[100],
max_depth=[10],
learning_rate=[0.3],
verbosity=[0],
booster=[None], # 'gbtree'
tree_method=['auto'],
n_jobs=[1],
gamma=[0],
min_child_weight=[None],
max_delta_step=[None],
subsample=[None],
colsample_bytree=[None],
colsample_bylevel=[None],
colsample_bynode=[None],
reg_alpha=[0],
reg_lambda=[0],
scale_pos_weight=[None],
base_score=[None],
random_state=[random.randint(0, 500)],
missing=[np.nan],
num_parallel_tree=[None],
monotone_constraints=[None],
interaction_constraints=[None],
importance_type=['gain']
):
params_dict = {
'objective': [objective][0],
'n_estimators': [n_estimators][0],
'max_depth': [max_depth][0],
'learning_rate': [learning_rate][0],
'verbosity': [verbosity][0],
'booster': [booster][0],
'tree_method': [tree_method][0],
'n_jobs': [n_jobs][0],
'gamma': [gamma][0],
'min_child_weight': [min_child_weight][0],
'max_delta_step': [max_delta_step][0],
'subsample': [subsample][0],
'colsample_bytree': [colsample_bytree][0],
'colsample_bylevel': [colsample_bylevel][0],
'colsample_bynode': [colsample_bynode][0],
'reg_alpha': [reg_alpha][0],
'reg_lambda': [reg_lambda][0],
'scale_pos_weight': [scale_pos_weight][0],
'base_score': [base_score][0],
'random_state': [random_state][0],
'missing': [missing][0],
'num_parallel_tree': [num_parallel_tree][0],
'monotone_constraints': [monotone_constraints][0],
'interaction_constraints': [interaction_constraints][0],
'importance_type': [importance_type][0]
}
return params_dict
def xgb_params_pipeline(self, existing_prefix='', prefix_to_add='xgb__'):
params_dict = {
prefix_to_add+'objective': self.dictionary_params[existing_prefix+'objective'],
prefix_to_add+'n_estimators': self.dictionary_params[existing_prefix+'n_estimators'],
prefix_to_add+'max_depth': self.dictionary_params[existing_prefix+'max_depth'],
prefix_to_add+'learning_rate': self.dictionary_params[existing_prefix+'learning_rate'],
prefix_to_add+'verbosity': self.dictionary_params[existing_prefix+'verbosity'],
prefix_to_add+'booster': self.dictionary_params[existing_prefix+'booster'],
prefix_to_add+'tree_method': self.dictionary_params[existing_prefix+'tree_method'],
prefix_to_add+'n_jobs': self.dictionary_params[existing_prefix+'n_jobs'],
prefix_to_add+'gamma': self.dictionary_params[existing_prefix+'gamma'],
prefix_to_add+'min_child_weight': self.dictionary_params[existing_prefix+'min_child_weight'],
prefix_to_add+'max_delta_step': self.dictionary_params[existing_prefix+'max_delta_step'],
prefix_to_add+'subsample': self.dictionary_params[existing_prefix+'subsample'],
prefix_to_add+'colsample_bytree': self.dictionary_params[existing_prefix+'colsample_bytree'],
prefix_to_add+'colsample_bylevel': self.dictionary_params[existing_prefix+'colsample_bylevel'],
prefix_to_add+'colsample_bynode': self.dictionary_params[existing_prefix+'colsample_bynode'],
prefix_to_add+'reg_alpha': self.dictionary_params[existing_prefix+'reg_alpha'],
prefix_to_add+'reg_lambda': self.dictionary_params[existing_prefix+'reg_lambda'],
prefix_to_add+'scale_pos_weight': self.dictionary_params[existing_prefix+'scale_pos_weight'],
prefix_to_add+'base_score': self.dictionary_params[existing_prefix+'base_score'],
prefix_to_add+'random_state': self.dictionary_params[existing_prefix+'random_state'],
prefix_to_add+'missing': self.dictionary_params[existing_prefix+'missing'],
prefix_to_add+'num_parallel_tree': self.dictionary_params[existing_prefix+'num_parallel_tree'],
prefix_to_add+'monotone_constraints': self.dictionary_params[existing_prefix+'monotone_constraints'],
prefix_to_add+'interaction_constraints': self.dictionary_params[existing_prefix+'interaction_constraints'],
prefix_to_add+'importance_type': self.dictionary_params[existing_prefix+'importance_type'],
}
return params_dict
# Greedy search?
def create_spaces(self, prefix_pipeline, estimator_name):
df = pd.DataFrame(data=[self.dictionary_params])
params_range = {}
for col in df.columns:
number = 0
string = 0 # not needed so far
nones = 0
trees = 0
string_key = str(col)
for i in df[col][0]:
type_i = type(i)
if (type_i == int) | (type_i == float):
number += 1
elif type_i == str: # not needed
string += 1
elif i == None: # not needed?
nones += 1
elif (type_i == DecisionTreeRegressor):
trees += 1
# Ranges for simple numeric values - FIX check upon them
if (number == len(df)) & (col != prefix_pipeline+'verbose') & \
(col != (prefix_pipeline+'random_state')) & (col != (prefix_pipeline+'verbosity')) \
& (col != (prefix_pipeline+'n_jobs')) & (trees == 0) & + (col != (prefix_pipeline+'n_iter_no_change')) & \
(col != (prefix_pipeline+'missing')) & (col != (prefix_pipeline+'validation_fraction')):
output = df[col][0][0]
if estimator_name == 'RandomForest':
range_output, lower_output, upper_output = ModelsParameters.rf_ranges(self, col, prefix_pipeline, output)
elif estimator_name == 'AdaBoost':
range_output, lower_output, upper_output = ModelsParameters.ab_ranges(self, col, prefix_pipeline, output, trees)
elif estimator_name == 'GradientBoosting':
range_output, lower_output, upper_output = ModelsParameters.gb_ranges(self, col, prefix_pipeline, output)
elif estimator_name == 'XGBoost':
range_output, lower_output, upper_output = ModelsParameters.xgb_ranges(self, col, prefix_pipeline, output)
# Further Conditions on the allowed output range and append
data_to_append = ModelsParameters.create_outputs(self, output, range_output, string_key, lower_output, upper_output)
params_range.update(data_to_append)
# Special Range for AdaBoost trees' max_depth
elif (trees > 0):
data_to_append = ModelsParameters.range_ab_decision_tree(df, self.dictionary_params, col, prefix_pipeline)
params_range.update(data_to_append)
# Else cases - just repeat the same value
else:
data_to_append = {string_key: [i]}
params_range.update(data_to_append)
return params_range
def rf_ranges(self, col, prefix_pipeline, output):
if col == prefix_pipeline+'n_estimators':
range_output = 5
elif col == prefix_pipeline+'max_depth':
range_output = 3
elif col == prefix_pipeline+'min_samples_split':
range_output = 2
elif col == prefix_pipeline+'min_samples_leaf':
range_output = 1
elif col == prefix_pipeline+'min_weight_fraction_leaf':
range_output = 0.05
elif col == prefix_pipeline+'max_features':
range_output = 0
elif col == prefix_pipeline+'max_leaf_nodes':
range_output = 0
elif col == prefix_pipeline+'min_impurity_decrease':
range_output = 0.2
elif col == prefix_pipeline+'ccp_alpha':
range_output = 0.2
elif col == prefix_pipeline+'max_samples':
range_output = 0
lower_output = output - range_output
upper_output = output + range_output
return range_output, lower_output, upper_output
def ab_ranges(self, col, prefix_pipeline, output, trees):
# FIX later: for not needed, thinking of merging with the estimator for tree
if trees == 0:
if col == prefix_pipeline+'n_estimators':
range_output = 5
elif col == prefix_pipeline+'learning_rate':
range_output = 0.01 # FIX: is learning rate max == 1?
else:
pass
lower_output = output - range_output
upper_output = output + range_output
return range_output, lower_output, upper_output
def range_ab_decision_tree(df, start_params, col, prefix_pipeline): # # For AdaBoost range of base_estimator max_depth
tree = df[col][0][0] # not needed
for i in start_params[col]:
x = re.split("\=", str(i))
y = re.split("\)", str(x[1]))[0]
max_depth = int(str(y))
output = sklearn.tree.DecisionTreeRegressor(max_depth=max_depth)
if col == prefix_pipeline+'base_estimator':
range_output = 3
lower_output = max_depth - range_output
upper_output = max_depth + range_output
if (range_output != 0) & (lower_output > 0):
data_to_append = {str(col): [
sklearn.tree.DecisionTreeRegressor(max_depth=lower_output),
output,
sklearn.tree.DecisionTreeRegressor(max_depth=upper_output)
]}
elif (range_output != 0) & (lower_output <= 0):
data_to_append = {str(col): [
output,
sklearn.tree.DecisionTreeRegressor(max_depth=upper_output)
]}
elif (range_output == 0):
data_to_append = {str(col): [
output
]}
return data_to_append
def gb_ranges(self, col, prefix_pipeline, output):
if col == prefix_pipeline+'learning_rate':
range_output = 0 # FIX: is learning rate max == 1?
elif col == prefix_pipeline+'n_estimators':
range_output = 5
elif col == prefix_pipeline+'subsample':
range_output = 0
elif col == prefix_pipeline+'min_samples_split':
range_output = 2
elif col == prefix_pipeline+'min_samples_leaf':
range_output = 1
elif col == prefix_pipeline+'min_weight_fraction_leaf':
range_output = 0.05
elif col == prefix_pipeline+'max_depth':
range_output = 3
elif col == prefix_pipeline+'min_impurity_decrease':
range_output = 0.2
elif col == prefix_pipeline+'max_features':
range_output = 0
elif col == prefix_pipeline+'alpha':
range_output = 0
elif col == prefix_pipeline+'max_leaf_nodes':
range_output = 0
elif col == prefix_pipeline+'tol':
range_output = 0
elif col == prefix_pipeline+'ccp_alpha':
range_output = 0
lower_output = output - range_output
upper_output = output + range_output
return range_output, lower_output, upper_output
def xgb_ranges(self, col, prefix_pipeline, output):
if col == prefix_pipeline+'n_estimators':
range_output = 5
elif col == prefix_pipeline+'max_depth':
range_output = 3
elif col == prefix_pipeline+'learning_rate':
range_output = 0 # FIX: is learning rate max == 1?
elif col == prefix_pipeline+'gamma':
range_output = 0
elif col == prefix_pipeline+'min_child_weight':
range_output = 0
elif col == prefix_pipeline+'max_delta_stop':
range_output = 0
elif col == prefix_pipeline+'subsample':
range_output = 0
elif col == prefix_pipeline+'colsample_bytree':
range_output = 0
elif col == prefix_pipeline+'colsample_bylevel':
range_output = 0
elif col == prefix_pipeline+'colsample_bynode':
range_output = 0
elif col == prefix_pipeline+'reg_alpha':
range_output = 0
elif col == prefix_pipeline+'reg_lambda':
range_output = 0
elif col == prefix_pipeline+'scale_pos_weight':
range_output = 0
elif col == prefix_pipeline+'base_score':
range_output = 0
elif col == prefix_pipeline+'monotone_constraints':
range_output = 0
elif col == prefix_pipeline+'interaction_constraints':
range_output = 0
lower_output = output - range_output
upper_output = output + range_output
return range_output, lower_output, upper_output
##
def create_outputs(self, output, range_output, string_key, lower_output, upper_output):
if range_output == 0:
data_to_append = {string_key: [
output
]}
elif (range_output != 0) & (lower_output > 0):
data_to_append = {string_key: [
lower_output,
output,
upper_output
]}
# FIX could be controversial in certain instances in case you want lower bound to be 0
elif (range_output != 0) & (lower_output == 0):
data_to_append = {string_key: [
output,
upper_output
]}
elif (lower_output < 0) & (output != 0):
data_to_append = {string_key: [
0,
output,
upper_output]}
elif (lower_output < 0) & (output == 0):
data_to_append = {string_key: [
output,
upper_output]}
return data_to_append
def best_model_pipeline(X, Y, pipeline, params_range, cv, scoring='neg_mean_squared_error'):
optimal_model = GridSearchCV(pipeline,
params_range,
scoring=scoring,
cv=cv,
refit=True) # when there is a list in scoring, it needs an explicit one. NP because here comes "s", not "scoring"
print('Below are the params_range')
print(params_range)
result = optimal_model.fit(X, Y)
best_params = result.best_estimator_ # result.best_params_ needed when refit=False
dict_parameters_pipeline = {}
for param in params_range: # list of parameters
dict_parameters_pipeline[str(param)] = [best_params.get_params()[str(param)]]
print('Below are the best params')
print(dict_parameters_pipeline)
return result, dict_parameters_pipeline
##
def NestedCV(X, Y, params, pipeline, prefix_pipeline=None, estimator=None, estimator_name=None,
NUM_TRIALS=1, # for repeated. Note that the sample is anew every time CHECK
inner_n_splits=5,
outer_n_splits=5,
adaptive_grid='yes',
scoring=['neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error'],
):
best_params = pd.DataFrame()
df_feature_importance = | pd.DataFrame() | pandas.DataFrame |
import gc
import sys
import logging
import yaml
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from utils import timer, load_data, reduce_mem_usage
from encoders import GaussianTargetEncoder
# define groupings and corresponding priors
groups_and_priors = {
# single encodings
("hour",): None,
("weekday",): None,
("month",): None,
("building_id",): None,
("primary_use",): None,
("site_id",): None,
# ("meter",): None,
# # second-order interactions
# ("meter", "hour"): ["gte_meter", "gte_hour"],
# ("meter", "weekday"): ["gte_meter", "gte_weekday"],
# ("meter", "month"): ["gte_meter", "gte_month"],
# ("meter", "building_id"): ["gte_meter", "gte_building_id"],
# ("meter", "primary_use"): ["gte_meter", "gte_primary_use"],
# ("meter", "site_id"): ["gte_meter", "gte_site_id"],
# # higher-order interactions with building_id
# ("meter", "building_id", "hour"): ["gte_meter_building_id", "gte_meter_hour"],
# ("meter", "building_id", "weekday"): ["gte_meter_building_id", "gte_meter_weekday"],
# ("meter", "building_id", "month"): ["gte_meter_building_id", "gte_meter_month"],
}
def process_timestamp(df):
df.timestamp = pd.to_datetime(df.timestamp)
df.timestamp = (
df.timestamp - pd.to_datetime("2016-01-01")
).dt.total_seconds() // 3600
def process_weather(
df, dataset, fix_timestamps=True, interpolate_na=True, add_na_indicators=True
):
if fix_timestamps:
site_GMT_offsets = [-5, 0, -7, -5, -8, 0, -5, -5, -5, -6, -7, -5, 0, -6, -5, -5]
GMT_offset_map = {site: offset for site, offset in enumerate(site_GMT_offsets)}
df.timestamp = df.timestamp + df.site_id.map(GMT_offset_map)
if interpolate_na:
site_dfs = []
for site_id in df.site_id.unique():
# Make sure that we include all possible hours so that we can interpolate evenly
if dataset == "train":
site_df = (
df[df.site_id == site_id]
.set_index("timestamp")
.reindex(range(8784))
)
elif dataset == "test":
site_df = (
df[df.site_id == site_id]
.set_index("timestamp")
.reindex(range(8784, 26304))
)
else:
raise ValueError(f"dataset={dataset} not recognized")
site_df.site_id = site_id
for col in [c for c in site_df.columns if c != "site_id"]:
if add_na_indicators:
site_df[f"had_{col}"] = ~site_df[col].isna()
site_df[col] = site_df[col].interpolate(
limit_direction="both",
method="spline",
order=3,
)
# Some sites are completely missing some columns, so use this fallback
site_df[col] = site_df[col].fillna(df[col].median())
site_dfs.append(site_df)
df = pd.concat(
site_dfs
).reset_index() # make timestamp back into a regular column
if add_na_indicators:
for col in df.columns:
if df[col].isna().any():
df[f"had_{col}"] = ~df[col].isna()
return df.fillna(-1) # .set_index(["site_id", "timestamp"])
def add_lag_feature(df, window=3, group_cols="site_id", lag_cols=["air_temperature"]):
rolled = df.groupby(group_cols)[lag_cols].rolling(
window=window, min_periods=0, center=True
)
lag_mean = rolled.mean().reset_index().astype(np.float16)
lag_max = rolled.quantile(0.95).reset_index().astype(np.float16)
lag_min = rolled.quantile(0.05).reset_index().astype(np.float16)
lag_std = rolled.std().reset_index().astype(np.float16)
for col in lag_cols:
df[f"{col}_mean_lag{window}"] = lag_mean[col]
df[f"{col}_max_lag{window}"] = lag_max[col]
df[f"{col}_min_lag{window}"] = lag_min[col]
df[f"{col}_std_lag{window}"] = lag_std[col]
def add_features(df):
# time features
df["hour"] = df.ts.dt.hour
df["weekday"] = df.ts.dt.weekday
df["month"] = df.ts.dt.month
df["year"] = df.ts.dt.year
# time interactions
df["weekday_hour"] = df.weekday.astype(str) + "-" + df.hour.astype(str)
# apply cyclic encoding of periodic features
df["hour_x"] = np.cos(2 * np.pi * df.timestamp / 24)
df["hour_y"] = np.sin(2 * np.pi * df.timestamp / 24)
df["month_x"] = np.cos(2 * np.pi * df.timestamp / (30.4 * 24))
df["month_y"] = np.sin(2 * np.pi * df.timestamp / (30.4 * 24))
df["weekday_x"] = np.cos(2 * np.pi * df.timestamp / (7 * 24))
df["weekday_y"] = np.sin(2 * np.pi * df.timestamp / (7 * 24))
# meta data features
df["year_built"] = df["year_built"] - 1900
# bulding_id interactions
# bm_ = df.building_id.astype(str) + "-" + df.meter.astype(str) + "-"
bm_ = df.building_id.astype(str) + "-"
df["building_weekday_hour"] = bm_ + df.weekday_hour
df["building_weekday"] = bm_ + df.weekday.astype(str)
df["building_month"] = bm_ + df.month.astype(str)
df["building_hour"] = bm_ + df.hour.astype(str)
# df["building_meter"] = bm_
# get holidays
dates_range = pd.date_range(start="2015-12-31", end="2019-01-01")
us_holidays = calendar().holidays(start=dates_range.min(), end=dates_range.max())
df["is_holiday"] = (df.ts.dt.date.astype("datetime64").isin(us_holidays)).astype(
np.int8
)
if __name__ == "__main__":
# load config file from CLI
with open(str(sys.argv[1]), "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
algorithm = config["algorithm"]
discord_file = config["discord_file"]
data_location = config["data_location"]
discord_location = config["discord_location"]
output_location = config["output_location"]
# logging file
logging.basicConfig(
filename=algorithm + ".log",
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s",
)
logging.info(f"Experiment: {algorithm}")
with timer("Loading data"):
logging.info("Loading data")
train, test = load_data("input", data_location=data_location)
building_meta = load_data("meta", data_location=data_location)
train_weather, test_weather = load_data("weather", data_location=data_location)
with timer("Process timestamp"):
logging.info("Process timestamp")
train["ts"] = pd.to_datetime(train.timestamp)
test["ts"] = pd.to_datetime(test.timestamp)
process_timestamp(train)
process_timestamp(test)
process_timestamp(train_weather)
process_timestamp(test_weather)
with timer("Process weather"):
logging.info("Process weather")
process_weather(train_weather, "train")
process_weather(test_weather, "test")
for window_size in [7, 73]:
add_lag_feature(train_weather, window=window_size)
add_lag_feature(test_weather, window=window_size)
with timer("Combine data"):
logging.info("Combine data")
train = pd.merge(train, building_meta, "left", "building_id")
train = | pd.merge(train, train_weather, "left", ["site_id", "timestamp"]) | pandas.merge |
# package(s) for data handling
import pandas as pd
import numpy as np
# *** General functions
def report_element(Terminal, Element, year):
elements = 0
elements_online = 0
element_name = []
list_of_elements = find_elements(Terminal, Element)
if list_of_elements != []:
for element in list_of_elements:
element_name = element.name
elements += 1
if year >= element.year_online:
elements_online += 1
if Terminal.debug:
if elements_online or elements:
print(' a total of {} {} is online; a total of {} is still pending'.format(elements_online, element_name, elements - elements_online))
return elements_online, elements
def find_elements(Terminal, obj):
"""return elements of type obj part of Terminal.elements"""
list_of_elements = []
if Terminal.elements != []:
for element in Terminal.elements:
if isinstance(element, obj):
list_of_elements.append(element)
return list_of_elements
def add_cashflow_data_to_element(Terminal, element):
"""Place cashflow data in element dataframe
Elements that take two years to build are assign 60% to year one and 40% to year two."""
# years
years = Terminal.modelframe
#years = list(range(Terminal.startyear, Terminal.startyear + Terminal.lifecycle))
# capex
capex = element.capex
if hasattr(element, 'capex_material'):
capex_material = element.capex_material
else:
capex_material = 0
#capex_material = element.capex_material
# opex
maintenance = element.maintenance
insurance = element.insurance
labour = element.labour
if hasattr(element, 'purchaseH2'):
purchaseH2 = element.purchaseH2
else:
purchaseH2 = 0
if hasattr(element, 'purchase_material'):
purchase_material = element.purchase_material
else:
purchase_material = 0
# purchaseH2 = element.purchaseH2
# purchase_material = element.purchase_material
# year online
year_online = element.year_online
year_delivery = element.delivery_time
df = pd.DataFrame()
# years
df["year"] = years
# capex
if year_delivery > 1:
df.loc[df["year"] == year_online - 2, "capex"] = 0.6 * capex
df.loc[df["year"] == year_online - 1, "capex"] = 0.4 * capex
else:
df.loc[df["year"] == year_online - 1, "capex"] = capex
if capex_material:
df.loc[df["year"] == year_online, "capex_material"] = capex_material
# opex
if maintenance:
df.loc[df["year"] >= year_online, "maintenance"] = maintenance
if insurance:
df.loc[df["year"] >= year_online, "insurance"] = insurance
if labour:
df.loc[df["year"] >= year_online, "labour"] = labour
if purchaseH2:
df.loc[df["year"] >= year_online, "purchaseH2"] = purchaseH2
if purchase_material:
df.loc[df["year"] >= year_online, "purchase_material"] = purchase_material
df.fillna(0, inplace=True)
element.df = df
return element
def add_cashflow_elements(Terminal, labour):
"""Cycle through each element and collect all cash flows into a pandas dataframe."""
cash_flows = pd.DataFrame()
# initialise cash_flows
#cash_flows['year'] = list(range(Terminal.startyear, Terminal.startyear + Terminal.lifecycle))
cash_flows['year'] = Terminal.modelframe
cash_flows['capex'] = 0
cash_flows['capex_material'] = 0
cash_flows['maintenance'] = 0
cash_flows['insurance'] = 0
cash_flows['energy'] = 0
cash_flows['labour'] = 0
cash_flows['fuel'] = 0
cash_flows['purchaseH2'] = 0
cash_flows['purchase_material'] = 0
cash_flows['demurrage'] = 0 #Terminal.demurrage
try:
cash_flows['revenues'] = Terminal.revenues
except:
cash_flows['revenues'] = 0
# add labour component for years were revenues are not zero
# cash_flows.loc[cash_flows['revenues'] != 0, 'labour'] = \
# labour.international_staff * labour.international_salary + labour.local_staff * labour.local_salary
# todo: check the labour costs of the container terminals (they are not included now)
for element in Terminal.elements:
if hasattr(element, 'df'):
element.df = element.df.fillna(0)
for column in cash_flows.columns:
if column in element.df.columns and column != "year":
cash_flows[column] += element.df[column]
# calculate WACC real cashflows
cash_flows_WACC_real = pd.DataFrame()
cash_flows_WACC_real['year'] = cash_flows['year']
for year in Terminal.years:
for column in cash_flows.columns:
if column != "year":
cash_flows_WACC_real.loc[cash_flows_WACC_real['year'] == year, column] = \
cash_flows.loc[cash_flows['year'] == year, column] /\
((1 + WACC_real()) ** (year - Terminal.modelframe[0]+1))
cash_flows = cash_flows.fillna(0)
cash_flows_WACC_real = cash_flows_WACC_real.fillna(0)
return cash_flows, cash_flows_WACC_real
def NPV(Terminal, labour):
"""Gather data from Terminal elements and combine into a cash flow overview"""
# add cash flow information for each of the Terminal elements
cash_flows, cash_flows_WACC_real = add_cashflow_elements(Terminal, labour)
# prepare years, revenue, capex and opex for plotting
years = cash_flows_WACC_real['year'].values
revenues = cash_flows_WACC_real['revenues'].values
capex = cash_flows_WACC_real['capex'].values
opex = cash_flows_WACC_real['insurance'].values + \
cash_flows_WACC_real['maintenance'].values + \
cash_flows_WACC_real['energy'].values + \
cash_flows_WACC_real['demurrage'].values + \
cash_flows_WACC_real['fuel'].values + \
cash_flows_WACC_real['labour'].values
# collect all results in a pandas dataframe
df = pd.DataFrame(index=years, data=-capex, columns=['CAPEX'])
df['OPEX'] = -opex
df['REVENUES'] = revenues
df['PV'] = - capex - opex + revenues
df['cum-PV'] = np.cumsum(- capex - opex + revenues)
return df
def WACC_nominal(Gearing=60, Re=.10, Rd=.30, Tc=.28):
"""Nominal cash flow is the true dollar amount of future revenues the company expects
to receive and expenses it expects to pay out, including inflation.
When all cashflows within the model are denoted in real terms and including inflation."""
# Gearing = Gearing
# Re = Re # return on equity
# Rd = Rd # return on debt
# Tc = Tc # income tax
# E = 100 - Gearing
# D = Gearing
# WACC_nominal = ((E / (E + D)) * Re + (D / (E + D)) * Rd) * (1 - Tc)
WACC_nominal = 8/100
return WACC_nominal
def WACC_real(inflation=0.02): # old: interest=0.0604
"""Real cash flow expresses a company's cash flow with adjustments for inflation.
When all cashflows within the model are denoted in real terms and have been
adjusted for inflation (no inlfation has been taken into account),
WACC_real should be used. WACC_real is computed by as follows:"""
WACC_real = WACC_nominal()
#WACC_real = (WACC_nominal() + 1) / (inflation + 1) - 1
return WACC_real
def occupancy_to_waitingfactor(utilisation=.3, nr_of_servers_to_chk=4, kendall='E2/E2/n'):
"""Waiting time factor (E2/E2/n or M/E2/n) queueing theory using linear interpolation)"""
if kendall == 'E2/E2/n':
# Create dataframe with data from Groenveld (2007) - Table V
# See also PIANC 2014 Table 6.2
utilisations = np.array([.1, .2, .3, .4, .5, .6, .7, .8, .9])
nr_of_servers = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
data = np.array([
[0.0166, 0.0006, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0604, 0.0065, 0.0011, 0.0002, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.1310, 0.0235, 0.0062, 0.0019, 0.0007, 0.0002, 0.0001, 0.0000, 0.0000, 0.0000],
[0.2355, 0.0576, 0.0205, 0.0085, 0.0039, 0.0019, 0.0009, 0.0005, 0.0003, 0.0001],
[0.3904, 0.1181, 0.0512, 0.0532, 0.0142, 0.0082, 0.0050, 0.0031, 0.0020, 0.0013],
[0.6306, 0.2222, 0.1103, 0.0639, 0.0400, 0.0265, 0.0182, 0.0128, 0.0093, 0.0069],
[1.0391, 0.4125, 0.2275, 0.1441, 0.0988, 0.0712, 0.0532, 0.0407, 0.0319, 0.0258],
[1.8653, 0.8300, 0.4600, 0.3300, 0.2300, 0.1900, 0.1400, 0.1200, 0.0900, 0.0900],
[4.3590, 2.0000, 1.2000, 0.9200, 0.6500, 0.5700, 0.4400, 0.4000, 0.3200, 0.3000]
])
elif kendall == 'M/E2/n':
# Create dataframe with data from Groenveld (2007) - Table IV
# See also PIANC 2014 Table 6.1
utilisations = np.array([.1, .15, .2, .25, .3, .35, .4, .45, .5, .55, .6, .65, .7, .75, .8, .85, .9])
nr_of_servers = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
data = np.array([
[0.08, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.13, 0.02, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.19, 0.03, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.25, 0.05, 0.02, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.32, 0.08, 0.03, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.40, 0.11, 0.04, 0.02, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.50, 0.15, 0.06, 0.03, 0.02, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.60, 0.20, 0.08, 0.05, 0.03, 0.02, 0.01, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.75, 0.26, 0.12, 0.07, 0.04, 0.03, 0.02, 0.01, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00],
[0.91, 0.33, 0.16, 0.10, 0.06, 0.04, 0.03, 0.02, 0.02, 0.01, 0.01, 0.01, 0.00, 0.00],
[1.13, 0.43, 0.23, 0.14, 0.09, 0.06, 0.05, 0.03, 0.03, 0.02, 0.02, 0.01, 0.01, 0.01],
[1.38, 0.55, 0.30, 0.19, 0.12, 0.09, 0.07, 0.05, 0.04, 0.03, 0.03, 0.02, 0.02, 0.02],
[1.75, 0.73, 0.42, 0.27, 0.19, 0.14, 0.11, 0.09, 0.07, 0.06, 0.05, 0.04, 0.03, 0.03],
[2.22, 0.96, 0.59, 0.39, 0.28, 0.21, 0.17, 0.14, 0.12, 0.10, 0.08, 0.07, 0.06, 0.05],
[3.00, 1.34, 0.82, 0.57, 0.42, 0.33, 0.27, 0.22, 0.18, 0.16, 0.13, 0.11, 0.10, 0.09],
[4.50, 2.00, 1.34, 0.90, 0.70, 0.54, 0.46, 0.39, 0.34, 0.30, 0.26, 0.23, 0.20, 0.18],
[6.75, 3.14, 2.01, 1.45, 1.12, 0.91, 0.76, 0.65, 0.56, 0.50, 0.45, 0.40, 0.36, 0.33]
])
df = | pd.DataFrame(data, index=utilisations, columns=nr_of_servers) | pandas.DataFrame |
import warnings
from collections import namedtuple
from functools import partial
import numpy
from scipy import stats
import pandas
import statsmodels.api as sm
from statsmodels.tools.decorators import cache_readonly
try:
from tqdm import tqdm
except ImportError: # pragma: no cover
tqdm = None
from wqio import utils
from wqio import bootstrap
from wqio.ros import ROS
from wqio import validate
from wqio.features import Location, Dataset
_Stat = namedtuple("_stat", ["stat", "pvalue"])
def _dist_compare(x, y, stat_comp_func):
if (len(x) == len(y)) and numpy.equal(x, y).all():
return _Stat(numpy.nan, numpy.nan)
return stat_comp_func(x, y, alternative="two-sided")
class DataCollection(object):
"""Generalized water quality comparison object.
Parameters
----------
dataframe : pandas.DataFrame
Dataframe all of the data to analyze.
rescol, qualcol, stationcol, paramcol : string
Column labels for the results, qualifiers, stations (monitoring
locations), and parameters (pollutants), respectively.
.. note::
Non-detect results should be reported as the detection
limit of that observation.
ndval : string or list of strings, options
The values found in ``qualcol`` that indicates that a
result is a non-detect.
othergroups : list of strings, optional
The columns (besides ``stationcol`` and ``paramcol``) that
should be considered when grouping into subsets of data.
pairgroups : list of strings, optional
Other columns besides ``stationcol`` and ``paramcol`` that
can be used define a unique index on ``dataframe`` such that it
can be "unstack" (i.e., pivoted, cross-tabbed) to place the
``stationcol`` values into columns. Values of ``pairgroups``
may overlap with ``othergroups``.
useros : bool (default = True)
Toggles the use of regression-on-order statistics to estimate
non-detect values when computing statistics.
filterfxn : callable, optional
Function that will be passed to the ``filter`` method of a
``pandas.Groupby`` object to remove groups that should not be
analyzed (for whatever reason). If not provided, all groups
returned by ``dataframe.groupby(by=groupcols)`` will be used.
bsiter : int
Number of iterations the bootstrapper should use when estimating
confidence intervals around a statistic.
showpbar : bool (True)
When True and the `tqdm` module is available, this will toggle the
appears of progress bars in long-running group by-apply operations.
"""
# column that stores the censorsip status of an observation
cencol = "__censorship"
def __init__(
self,
dataframe,
rescol="res",
qualcol="qual",
stationcol="station",
paramcol="parameter",
ndval="ND",
othergroups=None,
pairgroups=None,
useros=True,
filterfxn=None,
bsiter=10000,
showpbar=True,
):
# cache for all of the properties
self._cache = {}
# basic input
self.raw_data = dataframe
self._raw_rescol = rescol
self.qualcol = qualcol
self.stationcol = stationcol
self.paramcol = paramcol
self.ndval = validate.at_least_empty_list(ndval)
self.othergroups = validate.at_least_empty_list(othergroups)
self.pairgroups = validate.at_least_empty_list(pairgroups)
self.useros = useros
self.filterfxn = filterfxn or utils.non_filter
self.bsiter = bsiter
self.showpbar = showpbar
# column that stores ROS'd values
self.roscol = "ros_" + rescol
# column stators "final" values
if self.useros:
self.rescol = self.roscol
else:
self.rescol = rescol
# columns to group by when ROS'd, doing general stats
self.groupcols = [self.stationcol, self.paramcol] + self.othergroups
self.groupcols_comparison = [self.paramcol] + self.othergroups
# columns to group and pivot by when doing paired stats
self.pairgroups = self.pairgroups + [self.stationcol, self.paramcol]
# final column list of the tidy dataframe
self.tidy_columns = self.groupcols + [self._raw_rescol, self.cencol]
# the "raw" data with the censorship column added
self.data = dataframe.assign(
**{self.cencol: dataframe[self.qualcol].isin(self.ndval)}
).reset_index()
@cache_readonly
def tidy(self):
if self.useros:
def fxn(g):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rosdf = (
ROS(
df=g,
result=self._raw_rescol,
censorship=self.cencol,
as_array=False,
)
.rename(columns={"final": self.roscol})
.loc[:, [self._raw_rescol, self.roscol, self.cencol]]
)
return rosdf
else:
def fxn(g):
g[self.roscol] = numpy.nan
return g
if tqdm and self.showpbar:
def make_tidy(df):
tqdm.pandas(desc="Tidying the DataCollection")
return df.groupby(self.groupcols).progress_apply(fxn)
else:
def make_tidy(df):
return df.groupby(self.groupcols).apply(fxn)
keep_cols = self.tidy_columns + [self.roscol]
with warnings.catch_warnings():
warnings.simplefilter("once")
_tidy = (
self.data.reset_index()[self.tidy_columns]
.groupby(by=self.groupcols)
.filter(self.filterfxn)
.pipe(make_tidy)
.reset_index()
.sort_values(by=self.groupcols)
)
return _tidy[keep_cols]
@cache_readonly
def paired(self):
_pairs = (
self.data.reset_index()
.groupby(by=self.groupcols)
.filter(self.filterfxn)
.set_index(self.pairgroups)
.unstack(level=self.stationcol)
.rename_axis(["value", self.stationcol], axis="columns")
)[[self._raw_rescol, self.cencol]]
return _pairs
def generic_stat(
self,
statfxn,
use_bootstrap=True,
statname=None,
has_pvalue=False,
filterfxn=None,
**statopts
):
"""Generic function to estimate a statistic and its CIs.
Parameters
----------
statfxn : callable
A function that takes a 1-D sequnce and returns a scalar
results. Its call signature should be in the form:
``statfxn(seq, **kwargs)``.
use_bootstrap : bool, optional
Toggles using a BCA bootstrapping method to estimate the
95% confidence interval around the statistic.
statname : string, optional
Name of the statistic. Included as a column name in the
final dataframe.
has_pvalue : bool, optional
Set to ``True`` if ``statfxn`` returns a tuple of the
statistic and it's p-value.
**statopts : optional kwargs
Additional keyword arguments that will be passed to
``statfxn``.
Returns
-------
stat_df : pandas.DataFrame
A dataframe all the results of the ``statfxn`` when applied
to ``self.tidy.groupby(self.groupcols)``.
Examples
--------
This actually demonstrates how ``DataCollection.mean`` is
implemented.
>>> import numpy
>>> import wqio
>>> from wqio.tests import helpers
>>> df = helpers.make_dc_data_complex()
>>> dc = DataCollection(df, rescol='res', qualcol='qual',
... stationcol='loc', paramcol='param',
... ndval='<')
>>> means = dc.generic_stat(numpy.mean, statname='Arith. Mean')
You can also use ``lambda`` objects
>>> pctl35 = dc.generic_stat(lambda x: numpy.percentile(x, 35),
... statname='pctl35', use_bootstrap=False)
"""
if statname is None:
statname = "stat"
if filterfxn is None:
filterfxn = utils.non_filter
def fxn(x):
data = x[self.rescol].values
if use_bootstrap:
stat = statfxn(data)
lci, uci = bootstrap.BCA(data, statfxn=statfxn)
values = [lci, stat, uci]
statnames = ["lower", statname, "upper"]
else:
values = validate.at_least_empty_list(statfxn(data, **statopts))
if hasattr(values, "_fields"): # nametuple
statnames = values._fields
else: # tuple
statnames = [statname]
if has_pvalue:
statnames.append("pvalue")
return | pandas.Series(values, index=statnames) | pandas.Series |
# -*- coding: utf-8 -*-
# import pytest
import pandas as pd
import pandas.testing as tm
import xnd
from pandas.core.internals import ExtensionBlock
import numpy as np
import xndframes as xf
TEST_ARRAY = ["Test", "string", None]
def test_constructors():
v1 = xf.XndframesArray(TEST_ARRAY)
assert isinstance(v1.dtype, xf.XndframesDtype)
v2 = xf.XndframesArray(np.array(TEST_ARRAY))
assert isinstance(v2.dtype, xf.XndframesDtype)
v3 = xf.XndframesArray(xnd.xnd(TEST_ARRAY))
assert isinstance(v3.dtype, xf.XndframesDtype)
def test_concatenate_blocks():
v1 = xf.XndframesArray(TEST_ARRAY)
sa = pd.Series(v1)
result = pd.concat([sa, sa], ignore_index=True)
EXPECTED_ARRAY = xnd.xnd(["Test", "string", None, "Test", "string", None])
expected = pd.Series(xf.XndframesArray(EXPECTED_ARRAY))
tm.assert_series_equal(result, expected)
def test_series_constructor():
v = xf.XndframesArray(TEST_ARRAY)
result = | pd.Series(v) | pandas.Series |
#--------------------------------------------------
import pandas as pd
import numpy as np
import datetime
from datetime import date
import Auxiliary.auxiliary_functions as aux_fun
#--------------------------------------------------
def read_and_delete():
'''
Function that reads the processed dataframes and deltes the
columns that don't provide any info.
'''
visita_basal = pd.read_csv("./Temp/OR_vb.csv")
visita_seguiment = pd.read_csv("./Temp/OR_vs.csv")
events = pd.read_csv("./Temp/OR_events.csv")
# Deleting the previous temporary files
del_csvs = ["OR_vb","OR_vs","OR_events"]
aux_fun.delete_csvs(del_csvs,"./Temp/")
# Deleting the columns
visita_basal = visita_basal.dropna(how='all', axis=1)
visita_seguiment = visita_seguiment.dropna(how='all', axis=1)
events = events.dropna(how='all', axis=1)
return visita_basal,visita_seguiment,events
#--------------------------------------------------
def extract_season(string):
'''
Auxiliary function that given a YYYY-MM-DDTHH:mm:ssZ formated string
returns the season of the date
'''
# Checking if the provided date contains a value.
if not pd.isnull(string):
val = aux_fun.string_to_date(string)
Y = 2000 # Dummy leap year to allow input X-02-29 (leap day)
seasons = [('hivern', (date(Y, 1, 1), date(Y, 3, 20))),
('primavera', (date(Y, 3, 21), date(Y, 6, 20))),
('estiu', (date(Y, 6, 21), date(Y, 9, 22))),
('tardor', (date(Y, 9, 23), date(Y, 12, 20))),
('hivern', (date(Y, 12, 21), date(Y, 12, 31)))]
val = val.replace(year=Y)
return next(season for season, (start, end) in seasons
if start <= val <= end)
return np.nan
#--------------------------------------------------
def season_visit(df):
'''
Function that given a dataframe, computes the season for each visit contained
on the df.
'''
df["estacio_visita"] = np.nan
df.loc[:,"estacio_visita"] = df.apply(lambda row: extract_season(row.data_visita), axis= 1)
#--------------------------------------------------
def visit_duration(df):
'''
Function that given a dataframe, computes the duration of the visit
for each observation contained on the df. The function removes those observations
with negative duration.
'''
df["estada"] = np.nan
df.loc[:,"estada"] = df.apply(lambda row: aux_fun.difference_in_days(row.data_visita,row.alta_event,1), axis= 1)
df = df[df['estada'] >= 0]
df.reset_index(drop=True, inplace=True)
return df
#--------------------------------------------------
def days_since_first_visit(df,base):
'''
Auxiliary function that given the df interested and a base df for comparison,
computes the number of days that have passed since the first appearence of a
patient in the base df.
'''
df["days_since_start"] = np.nan
for person in df['nhc'].unique():
aux = base.loc[base['nhc'] == person,'data_visita']
if aux.shape[0] > 0:
day_ini = aux.values[0]
df.loc[df["nhc"] == person,"days_since_start"] = df.loc[df["nhc"] == person].apply(lambda row : aux_fun.difference_in_days(day_ini,row.data_visita,1), axis = 1)
else:
df = df.drop(df[df.nhc == person].index)
return df
#--------------------------------------------------
def string_to_year(string):
'''
Auxiliary function that given a YYYY-MM-DDTHH:mm:ssZ formated
string, returns it years.
'''
if not pd.isnull(string):
sep = string.split('T')[0]
da = datetime.datetime.strptime(sep, '%Y-%m-%d').date()
return da.year
return np.nan
#--------------------------------------------------
def old_ecos(df,th):
'''
Function that imputes as NA all the ecos performed before
the given threshold.
'''
old_data_aux = df.copy()
old_data_aux["year"] = np.nan
old_data_aux.loc[:,"year"] = old_data_aux.apply(lambda row: string_to_year(row.eco_basal), axis= 1)
# Obtaining the nhcs from the patients that have an eco before the th.
old_nhc = list(old_data_aux.loc[old_data_aux["year"] <= th, "nhc"])
df.loc[df.nhc.isin(old_nhc),"eco_basal"] = np.nan
#--------------------------------------------------
def old_visits(df_vb,df_vs,th):
'''
Function that deletes the observation which initial visit was before the
given threshold and don't have any "visita_seguiment".
'''
old_data_aux = df_vb.copy()
old_data_aux["year"] = np.nan
old_data_aux.loc[:,"year"] = old_data_aux.apply(lambda row: string_to_year(row.data_visita), axis= 1)
# Obtaining the nhcs from the patients that fulfill the conditions.
old_nhc = list(old_data_aux.loc[old_data_aux["year"] <= th, "nhc"])
old_nhc_def = []
for person in old_nhc:
aux = df_vs.loc[df_vs['nhc'] == person, :]
if aux.shape[0] == 0:
old_nhc_def.append(person)
# Deleting all the observations whose nhc appears in old_nhc_def.
df_vb.drop(df_vb[df_vb.nhc.isin(old_nhc_def)].index, inplace = True)
df_vb.reset_index(drop=True, inplace=True)
return df_vb
#--------------------------------------------------
def visita_basal_outliers(df_vb,df_vs,th_v,th_e):
'''
Function that deletes outliers from the visita_basal df.
'''
df_vb = old_visits(df_vb,df_vs,th_v)
old_ecos(df_vb,th_e)
# Deleting one observation that has 21 UCI visits but any following visit.
df_vb.drop(df_vb[df_vb.ucies_basal == 21].index, inplace=True)
df_vb.reset_index(drop=True, inplace=True)
# Modifying a manuscript error in the height variable.
df_vb.at[df_vb["talla"] == 716,"talla"] = 176
return df_vb
#--------------------------------------------------
def fixing_weight_height(df,type):
'''
Function that modifies wrong manually-introduced values. These errors fall
in the following categories:
· Values introduced in meters instead of centimeters.
· Values that miss the 1 at the beginning.
'''
# VISITA BASAL
if type == 1:
df.loc[df["talla"] < 10,"talla"] = df.loc[df["talla"] < 10].apply(lambda row: row.talla *100,axis =1 )
df.loc[df["talla"] < 100,"talla"] = df.loc[df["talla"] < 100].apply(lambda row: row.talla +100,axis =1 )
# VISITA SEGUIMENT
if type == 2:
df.loc[df["talla_seguim"] < 10,"talla_seguim"] = df.loc[df["talla_seguim"] < 10].apply(lambda row: row.talla_seguim *100,axis =1 )
df.loc[df["talla_seguim"] < 100,"talla_seguim"] = df.loc[df["talla_seguim"] < 100].apply(lambda row: row.talla_seguim +100,axis =1 )
#--------------------------------------------------
def compute_imc(weight,height):
'''
Auxiliary function that given weight (in kg) and height (in cm)
compute the IMC using the basic formula.
'''
# Checking if both provided variables contain info.
if not pd.isnull(weight) and not | pd.isnull(height) | pandas.isnull |
#!/usr/bin/python
from configparser import ConfigParser
import linecache
import numpy as np
from osgeo import ogr
import psycopg2
import psycopg2.extensions
import pandas as pd
import pandas.io.sql as pdsql
import re
import sys
def drawPolygon(pointlist):
geomtxt = []
polyList = []
if pointlist[3]>0 and pointlist[2]<0:
polyList.append([pointlist[0],pointlist[1],180.0,pointlist[3]])
polyList.append([pointlist[0],pointlist[1],pointlist[2],-180.0])
else:
polyList.append(pointlist)
for poly in polyList:
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(poly[3], poly[1])
ring.AddPoint(poly[2], poly[1])
ring.AddPoint(poly[2], poly[0])
ring.AddPoint(poly[3], poly[0])
ring.AddPoint(poly[3], poly[1])
# Create polygon
polygon = ogr.Geometry(ogr.wkbPolygon)
polygon.AddGeometry(ring)
polygon.FlattenTo2D()
txt = polygon.ExportToWkt()
geomtxt.append(txt)
return geomtxt
def level6Divide():
conn = None
try:
# For level 6 aggregate table division
df = | pd.read_csv('level6.csv') | pandas.read_csv |
import re
import tempfile
import pandas
from dnm_cohorts.download_file import download_with_cookies
from dnm_cohorts.person import Person
from dnm_cohorts.convert_pdf_table import extract_pages, convert_page
url = 'https://www.nejm.org/doi/suppl/10.1056/NEJMoa1206524/suppl_file/nejmoa1206524_appendix.pdf'
def extract_table(handle):
records = []
for page in extract_pages(handle, start=4, end=26):
data = convert_page(page)
data = sorted(data, reverse=True, key=lambda x: x.y0)
for line in data:
text = [ x.get_text() for x in sorted(line, key=lambda x: x.x0) ]
text = ''.join(text)
records.append(text)
male = re.compile('[Tt]his (boy|male)+')
female = re.compile('[Tt]his (girl|female)+')
ids, sex = [], []
for line in records:
if line.startswith('Trio'):
ids.append(line.split(' ')[1])
if male.search(line) is not None:
sex.append('male')
elif female.search(line) is not None:
sex.append('female')
else:
# Trio 69 only refers to 'she'.
sex.append('female')
return | pandas.DataFrame({'person_id': ids, 'sex': sex}) | pandas.DataFrame |
#!/usr/bin/env python
"""
"""
import click
import zipfile
import os
import json
import csv
import numpy as np
import dateutil.parser
import datetime
import pandas as pd
import plotly
import plotly.graph_objs as go
def parse_summary_json(file):
events = []
cycles = []
print(f"Reading from: {file}")
with open(file) as f:
data = json.load(f)
# Json file contains top level keys for each day
for day in data:
# Save each event
for x in data[day]["events"]:
value = None
# EVENT_TYPE_HEAT has x.setPoint.targets.heatingTarget
if "setPoint" in x:
value = x["setPoint"]["targets"]["heatingTarget"]
# EVENT_TYPE_AUTOAWAY has x.ecoAutoAway.targets.heatingTarget
if "ecoAutoAway" in x:
value = x["ecoAutoAway"]["targets"]["heatingTarget"]
# EVENT_TYPE_AWAY has x.ecoAway.targets.heatingTarget
if "ecoAway" in x:
value = x["ecoAway"]["targets"]["heatingTarget"]
time = pd.to_datetime(x["startTs"])
duration = | pd.to_timedelta(x["duration"]) | pandas.to_timedelta |
# Copyright (c) 2020 fortiss GmbH
#
# Authors: <NAME>, <NAME>, <NAME> and
# <NAME>
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from pathlib import Path
import os
from datetime import datetime
import pandas as pd
import numpy as np
import logging
from shutil import copyfile
import pathlib
import ntpath
from bark.benchmark.benchmark_analyzer import BenchmarkAnalyzer
from bark.runtime.viewer.video_renderer import VideoRenderer
from bark.runtime.viewer import MPViewer
from bark.runtime.commons.parameters import ParameterServer
from lxml import etree
from pathlib import Path
from bark.core.models.dynamic import StateDefinition
class ScenarioDumper(BenchmarkAnalyzer):
def __init__(self, base_result_folder, benchmark_result):
super(ScenarioDumper, self).__init__(benchmark_result)
# Create result dir
if not os.path.isdir(base_result_folder):
Path(base_result_folder).mkdir(parents=True, exist_ok=True)
this_result_folder = datetime.now().strftime("%d%m%Y_%H:%M:%S")
scenario_behavior = self._benchmark_result.get_benchmark_configs()[0].scenario_set_name \
+ "_" + self._benchmark_result.get_benchmark_configs()[0].behavior_config.behavior_name
self._result_folder = os.path.join(base_result_folder, scenario_behavior, this_result_folder)
Path(self._result_folder).mkdir(parents=True, exist_ok=True)
# Based on the given filter dump the matching scenarios
# @note we do not check if the entries in config_idx_list really exist!
def export_scenarios_filter(self, filter = {}, config_idx_list = []):
if filter:
configs_found = super().find_configs(filter)
else:
configs_found = []
if configs_found and config_idx_list:
configs_found = list(set(configs_found) & set(config_idx_list))
elif not configs_found and config_idx_list:
configs_found = config_idx_list
elif configs_found and not config_idx_list:
# noting to do
configs_found = configs_found
else: #both empty -> error
raise ValueError("Either specify a non-empty filter of a valid list of indices!")
for config in configs_found:
self.export(config)
return configs_found
# Dump a scenario given by the index in the result
def export(self, config_idx):
this_folder = os.path.join(self._result_folder, "ConfigIdx_"+str(config_idx))
Path(this_folder).mkdir(parents=True, exist_ok=True)
self.render_video(config_idx, this_folder)
self.write_trajectory(config_idx, this_folder)
self.write_scenario_parameter(config_idx, this_folder)
self.write_behavior_parameter(config_idx, this_folder)
self.write_map(config_idx, self._result_folder)
# Write video
def render_video(self, config_idx, folder):
viewer = MPViewer(
params=ParameterServer(),
center= [375, 0],
enforce_x_length=True,
x_length = 100.0,
use_world_bounds=True)
video_exporter = VideoRenderer(renderer=viewer, world_step_time=0.2)
super().visualize(viewer=video_exporter, configs_idx_list=[config_idx], \
real_time_factor=10, fontsize=6)
video_exporter.export_video(filename=os.path.join(folder,"video"), \
remove_image_dir = True)
# Collect all points from the trajectories of all agents.
# Note we use the historic states of the last world
def write_trajectory(self, config_idx, folder):
cols = ['angent_id','time','x','y','theta','v']
table = []
histories = super().get_benchmark_result().get_history(config_idx)
if histories is None:
logging.warning("No historic state saved, cannot dump trajetory")
return
scenario = histories[-1] #the last state inclues all the historic states
world = scenario.GetWorldState()
for (agent_id, agent) in world.agents.items():
for state_action in agent.history:
state = state_action[0]
table.append([agent_id, state[0], state[1], state[2], state[3], state[4]])
np_table = np.array(table)
df = | pd.DataFrame(np_table, columns=cols) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from fbprophet import Prophet
import os
import json
# In[2]:
import inspect
def get_default_args(func):
signature = inspect.signature(func)
return{
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
prophet_defaults = get_default_args(Prophet)
# In[3]:
hierarchy = {"total": ["aa", "ca", "ac", "cc", "dc", "da"]}
# In[4]:
df_path = "../data/df_add_lookup.csv"
df = pd.read_csv(df_path)
shift_cols = list(filter(lambda x: x.startswith("Customers") or x.startswith("Sales"),df.columns.tolist()))
for col in shift_cols:
df[col] = df[col].shift(1)
#df = df.dropna()
df = df.fillna(0)
if "Date" in df.columns.tolist():
df.index = | pd.to_datetime(df["Date"]) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# # Computing integral with quasi-Monte Carlo methods
#
# **<NAME>, PhD**
#
# This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>.
#
# Original (Matlab) CompEcon file: **demqua01bis.m**
#
# Running this file requires the Python version of CompEcon. This can be installed with pip by running
#
# !pip install compecon --upgrade
#
# <i>Last updated: 2021-Oct-01</i>
# <hr>
# ## About
# To seven significant digits,
# \begin{align*}
# A &=\int_{-1}^1\int_{-1}^1 e^{-x_1}\cos^2(x_2)dx _1dx_2\\
# &=\int_{-1}^1 e^{-x_1} dx _1 \times \int_{-1}^1 \cos^2(x_2) dx_2\\
# &=\left(e - \tfrac{1}{e}\right) \times \left(1+\tfrac{1}{2}\sin(2)\right)\\
# &\approx 3.4190098
# \end{align*}
# ## Initial tasks
# In[1]:
import numpy as np
from compecon import qnwequi
import pandas as pd
# ### Make support function
# In[2]:
f1 = lambda x1: np.exp(-x1)
f2 = lambda x2: np.cos(x2)**2
f = lambda x1, x2: f1(x1) * f2(x2)
# In[3]:
def quad(method, n):
(x1, x2), w = qnwequi(n,[-1, -1], [1, 1],method)
return w.dot(f(x1, x2))
# ## Compute the approximation errors
# In[4]:
nlist = range(3,7)
quadmethods = ['Random', 'Neiderreiter','Weyl']
f_quad = np.array([[quad(qnw[0], 10**ni) for qnw in quadmethods] for ni in nlist])
f_true = (np.exp(1) - np.exp(-1)) * (1+0.5*np.sin(2))
f_error = np.log10(np.abs(f_quad/f_true - 1))
# ## Make table with results
# In[5]:
results = | pd.DataFrame(f_error, columns=quadmethods) | pandas.DataFrame |
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import shap
import xgboost as xgb
from sqlflow_submitter import explainer
from sqlflow_submitter.db import (buffered_db_writer, connect_with_data_source,
db_generator)
def xgb_shap_dataset(datasource, select, feature_column_names, label_spec,
feature_specs):
label_spec["feature_name"] = label_spec["name"]
conn = connect_with_data_source(datasource)
stream = db_generator(conn.driver, conn, select, feature_column_names,
label_spec, feature_specs)
xs = | pd.DataFrame(columns=feature_column_names) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 22:18:50 2022
@author: oiseth
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 16 22:09:00 2021
@author: oiseth
"""
import numpy as np
from scipy import signal as spsp
from matplotlib import pyplot as plt
from copy import deepcopy
import pandas as pd
import os
__all__ = ["StaticCoeff",]
class StaticCoeff:
"""
A class used to represent static force coefficients of a bridge deck
Attributes:
-----------
drag_coeff : float
drag coefficient (normalized drag force).
lift_coeff : float
lift coefficient (normalized lift force).
pitch_coeff : float
pitching moment coefficient (normalized pitching motion).
pitch_motion : float
pitch motion used in the wind tunnel tests.
Methods:
........
fromWTT()
obtains the static coefficients from a wind tunnel test.
plot_drag()
plots the drag coefficeint as function of the pitching motion
plot_lift()
plots the lift coefficeint as function of the pitching motion
plot_pitch()
plots the pitching coefficeint as function of the pitching motion
"""
def __init__(self,drag_coeff,lift_coeff,pitch_coeff,pitch_motion,mean_wind=[]):
"""
parameters:
-----------
drag_coeff : float
drag coefficient (normalized drag force).
lift_coeff : float
lift coefficient (normalized lift force).
pitch_coeff : float
pitching moment coefficient (normalized pitching motion).
pitch_motion : float
pitch motion used in the wind tunnel tests.
mean_wind : float
the mean wind in which the static coefficeints have been obtained
"""
self.drag_coeff = drag_coeff
self.lift_coeff = lift_coeff
self.pitch_coeff = pitch_coeff
self.pitch_motion = pitch_motion
self.mean_wind = mean_wind
@classmethod
def fromWTT(cls,experiment_in_still_air,experiment_in_wind,section_width,section_height,section_length ):
""" fromWTT obtains an instance of the class StaticCoeff
parameters:
----------
experiment_in_still_air : instance of the class experiment
experiment_in_wind : instance of the class experiment
section_width : width of the bridge deck section model
section_height : height of the bridge deck section model
section_length : length of the bridge deck section model
returns:
-------
instance of the class StaticCoeff
"""
experiment_in_wind.align_with(experiment_in_still_air)
experiment_in_wind_still_air_forces_removed = deepcopy(experiment_in_wind)
experiment_in_wind_still_air_forces_removed.substract(experiment_in_still_air)
filter_order =6
cutoff_frequency = 1.0
sampling_frequency = 1/(experiment_in_still_air.time[1]-experiment_in_still_air.time[0])
sos = spsp.butter(filter_order,cutoff_frequency, fs=sampling_frequency, output="sos")
filtered_wind = np.mean(spsp.sosfiltfilt(sos,experiment_in_wind_still_air_forces_removed.wind_speed))
drag_coeff = experiment_in_wind_still_air_forces_removed.forces_global_center[:,0:24:6]*2/experiment_in_wind_still_air_forces_removed.air_density/filtered_wind**2/section_height/section_length
lift_coeff = experiment_in_wind_still_air_forces_removed.forces_global_center[:,2:24:6]*2/experiment_in_wind_still_air_forces_removed.air_density/filtered_wind**2/section_width/section_length
pitch_coeff = experiment_in_wind_still_air_forces_removed.forces_global_center[:,4:24:6]*2/experiment_in_wind_still_air_forces_removed.air_density/filtered_wind**2/section_width**2/section_length
pitch_motion = experiment_in_wind_still_air_forces_removed.motion[:,2]
return cls(drag_coeff,lift_coeff,pitch_coeff,pitch_motion,filtered_wind)
def to_excel(self,section_name,sheet_name='Wind speed #' ,section_width=0,section_height=0,section_length=0):
"""
Parameters
----------
section_name : string
section name.
sheet_name : string
name of the excel sheet that the data is stored in, optional
Width of the section model. The default is 0.
section_height : float64, optional
Height of the section model. The default is 0.
section_length : float64, optional
Length of the section model. The default is 0.
Returns
-------
None.
"""
static_coeff = pd.DataFrame({"pitch motion": self.pitch_motion[0:-1:10],
"C_D": np.sum(self.drag_coeff[0:-1:10],axis=1),
"C_L": np.sum(self.lift_coeff[0:-1:10],axis=1),
"C_m": np.sum(self.pitch_coeff[0:-1:10],axis=1),
})
geometry = pd.DataFrame({"D": [section_height],
"B": [section_width],
"L": [section_length]
})
if os.path.exists("Static_coeff_" + section_name + '.xlsx')==True:
with pd.ExcelWriter("Static_coeff_" + section_name + '.xlsx',mode="a",engine="openpyxl",if_sheet_exists="replace") as writer:
geometry.to_excel(writer, sheet_name="Dim section model")
static_coeff.to_excel(writer, sheet_name=sheet_name)
else:
with | pd.ExcelWriter("Static_coeff_" + section_name + '.xlsx') | pandas.ExcelWriter |
#!/usr/bin/python
# <NAME>, <EMAIL>
# v1.0, 09/13/2021
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import mannwhitneyu, norm, kruskal, spearmanr
from scipy.optimize import minimize_scalar
import scikit_posthocs as sp
from statsmodels.stats.multitest import fdrcorrection
from sklearn import metrics
targets = ['Healthy', 'ARPC', 'Luminal', 'NEPC', 'Basal', 'Patient', 'Gray', 'AMPC', 'MIX']
colors = ['#009988', '#0077BB', '#33BBEE', '#CC3311', '#EE7733', '#EE3377', '#BBBBBB', '#FFAE42', '#9F009F']
palette = {targets[i]: colors[i] for i in range(len(targets))}
interest_genes = ['AR', 'ASCL1', 'FOXA1', 'HOXB13', 'NKX3-1', 'REST', 'PGR', 'SOX2', 'ONECUT2', 'MYOG', 'MYF5']
sns.set(font_scale=1.5)
sns.set_style('ticks')
def fraction_plots(ref_dict, full_df, name):
features = list(ref_dict.keys())
# labels = pd.read_table(name + '/' + name + '_beta-predictions.tsv', sep='\t', index_col=0)
# full_df = pd.merge(labels, full_df, left_index=True, right_index=True)
# normalize = Normalize(0, 1)
# cmap = LinearSegmentedColormap.from_list('', ['#CC3311', '#9F009F', '#0077BB'])
for feature in features:
df = pd.concat([full_df['TFX'], full_df['Subtype'], full_df.filter(regex=feature)], axis=1)
x_arpc, y_arpc = df.loc[df['Subtype'] == 'ARPC', 'TFX'].values, df.loc[df['Subtype'] == 'ARPC', feature].values
r_val_arpc, p_val_arpc = spearmanr(x_arpc, y_arpc)
m_arpc, b_arpc = np.polyfit(x_arpc, y_arpc, 1)
x_nepc, y_nepc = df.loc[df['Subtype'] == 'NEPC', 'TFX'].values, df.loc[df['Subtype'] == 'NEPC', feature].values
r_val_nepc, p_val_nepc = spearmanr(x_nepc, y_nepc)
m_nepc, b_nepc = np.polyfit(x_nepc, y_nepc, 1)
plt.figure(figsize=(8, 8))
sns.scatterplot(x='TFX', y=feature, hue='Subtype', data=df, alpha=0.8, palette=palette, s=300)
plt.plot(x_arpc, m_arpc * x_arpc + b_arpc, lw=2, color=palette['ARPC'])
plt.plot(x_nepc, m_nepc * x_nepc + b_nepc, lw=2, color=palette['NEPC'])
# scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=cmap)
# scalarmappaple.set_array(df.values)
# plt.colorbar(scalarmappaple, )
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.title(feature + ' vs Tumor Fraction' +
'\n ARPC: Spearman = ' + "{:e}".format(r_val_arpc) + ', p-val = ' + "{:e}".format(p_val_arpc) +
'\n NEPC: Spearman = ' + "{:e}".format(r_val_nepc) + ', p-val = ' + "{:e}".format(p_val_nepc))
plt.savefig(name + '/' + feature + '_vsTFX.pdf', bbox_inches="tight")
plt.close()
def dist_plots(full_df, name):
features = list(set([item for item in list(full_df.columns) if '_' in item]))
for feature_label in features:
# format df for seaborn
subs_key = full_df['Subtype']
df = full_df.filter(regex=feature_label).transpose().melt()
df = pd.merge(subs_key, df, left_index=True, right_on='variable')
# histogram:
# plt.figure(figsize=(8, 8))
# sns.histplot(x='value', hue='Subtype', data=df, palette=palette, element="step")
# plt.xlabel(feature_label)
# plt.ylabel('Counts')
# plt.title(feature_label + ' Histogram', size=14)
# plt.savefig(name + '/' + feature_label + '_Histogram.pdf', bbox_inches="tight")
# plt.close()
# density plot
plt.figure(figsize=(8, 8))
sns.kdeplot(x='value', hue='Subtype', data=df, palette=palette, fill=True, common_norm=False)
plt.xlabel(feature_label)
plt.ylabel('Density')
plt.title(feature_label + ' Kernel Density Estimation', size=14)
plt.savefig(name + '/' + feature_label + '_Density.pdf', bbox_inches="tight")
plt.close()
def box_plots(df, name):
df = df[df.columns.drop(list(df.filter(regex='Window')))]
df = df.reindex(sorted(df.columns), axis=1)
df = df.melt(id_vars='Subtype', var_name='Feature', value_name='Value', ignore_index=False)
plt.figure(figsize=(12, 8))
ax = sns.boxplot(x='Feature', y='Value', hue='Subtype', data=df, palette=palette)
plt.setp(ax.get_xticklabels(), rotation=45)
plt.ylabel('Counts')
plt.title(name + ' Feature Distributions', size=14)
plt.savefig(name + '/' + name + '_BoxPlot.pdf', bbox_inches="tight")
plt.close()
def dist_plots_sample(full_df, name):
features = list(set([item for item in list(full_df.columns) if '_' in item]))
for feature_label in features:
# format df for seaborn
subs_key = full_df['Subtype']
df = full_df.filter(regex=feature_label).transpose().melt()
df = pd.merge(subs_key, df, left_index=True, right_on='variable')
print(df)
# plot_range = [0, 2 * np.mean(df['value'])]
for key in subs_key:
# density plot
plt.figure(figsize=(8, 8))
sns.kdeplot(x='value', hue='variable', data=df[df['Subtype'] == key], fill=True, common_norm=False)
plt.xlabel(feature_label)
plt.ylabel('Density')
plt.title(key + ' ' + name + ' Kernel Density Estimation', size=14)
plt.savefig(name + '/' + name + '_' + feature_label + '_Sample-Wise_'
+ key + '_Density.pdf', bbox_inches="tight")
plt.close()
def diff_exp_tw(df, name, thresh=0.05, sub_name=''):
print('Conducting three-way differential expression analysis . . .')
types = list(df.Subtype.unique())
df_t1 = df.loc[df['Subtype'] == types[0]].drop('Subtype', axis=1)
df_t2 = df.loc[df['Subtype'] == types[1]].drop('Subtype', axis=1)
df_t3 = df.loc[df['Subtype'] == types[2]].drop('Subtype', axis=1)
df_lpq = pd.DataFrame(index=df_t1.transpose().index, columns=['p-value', 'DunnSigPairs'])
for roi in list(df_t1.columns):
x, y, z = df_t1[roi].values, df_t2[roi].values, df_t3[roi].values
if np.count_nonzero(~np.isnan(x)) < 2 or np.count_nonzero(~np.isnan(y)) < 2 or np.count_nonzero(~np.isnan(z)) < 2:
continue
try:
kw_score = kruskal(x, y, z, nan_policy='omit')[1]
except ValueError:
continue
df_lpq.at[roi, 'p-value'] = kw_score
if kw_score < thresh:
pairs = 0
dunn_scores = sp.posthoc_dunn([x, y, z])
if dunn_scores[1][2] < thresh:
pairs += 1
if dunn_scores[1][3] < thresh:
pairs += 1
if dunn_scores[2][3] < thresh:
pairs += 1
df_lpq.at[roi, 'DunnSigPairs'] = pairs
else:
df_lpq.at[roi, 'DunnSigPairs'] = 0
# now calculate p-adjusted (Benjamini-Hochberg corrected p-values)
df_lpq = df_lpq.dropna(how='all')
df_lpq['p-adjusted'] = fdrcorrection(df_lpq['p-value'])[1]
df_lpq = df_lpq.infer_objects()
df_lpq = df_lpq.sort_values(by=['p-adjusted'])
df_lpq.to_csv(name + '/' + name + sub_name + '_three-way_rpq.tsv', sep="\t")
features = list(df_lpq[(df_lpq['p-adjusted'] < thresh) & (df_lpq['DunnSigPairs'] == 3)].index)
with open(name + '/' + name + sub_name + '_three-way_FeatureList.tsv', 'w') as f_output:
for item in features:
f_output.write(item + '\n')
return pd.concat([df.iloc[:, :1], df.loc[:, df.columns.isin(features)]], axis=1, join='inner')
def diff_exp(df, name, thresh=0.05, sub_name=''):
print('Conducting differential expression analysis . . .')
types = list(df.Subtype.unique())
df_t1 = df.loc[df['Subtype'] == types[0]].drop('Subtype', axis=1)
df_t2 = df.loc[df['Subtype'] == types[1]].drop('Subtype', axis=1)
df_lpq = pd.DataFrame(index=df_t1.transpose().index, columns=['ratio', 'p-value'])
for roi in list(df_t1.columns):
x, y = df_t1[roi].values, df_t2[roi].values
if np.count_nonzero(~np.isnan(x)) < 2 or np.count_nonzero(~np.isnan(y)) < 2:
continue
df_lpq.at[roi, 'ratio'] = np.mean(x)/np.mean(y)
df_lpq.at[roi, 'p-value'] = mannwhitneyu(x, y)[1]
# now calculate p-adjusted (Benjamini-Hochberg corrected p-values)
df_lpq['p-adjusted'] = fdrcorrection(df_lpq['p-value'])[1]
df_lpq = df_lpq.sort_values(by=['p-adjusted'])
df_lpq = df_lpq.infer_objects()
df_lpq.to_csv(name + '/' + name + sub_name + '_rpq.tsv', sep="\t")
features = list(df_lpq[(df_lpq['p-adjusted'] < thresh)].index)
with open(name + '/' + name + sub_name + '_FeatureList.tsv', 'w') as f_output:
for item in features:
f_output.write(item + '\n')
return pd.concat([df.iloc[:, :1], df.loc[:, df.columns.isin(features)]], axis=1, join='inner')
def metric_analysis(df, name):
print('Calculating metric dictionary . . .')
df = df.dropna(axis=1)
features = list(df.iloc[:, 1:].columns)
types = list(df.Subtype.unique())
mat = {}
for feature in features:
sub_df = pd.concat([df.iloc[:, :1], df[[feature]]], axis=1, join='inner')
mat[feature] = {'Feature': feature}
for subtype in types:
mat[feature][subtype + '_Mean'] = np.nanmean(
sub_df[sub_df['Subtype'] == subtype].iloc[:, 1:].to_numpy().flatten())
mat[feature][subtype + '_Std'] = np.nanstd(
sub_df[sub_df['Subtype'] == subtype].iloc[:, 1:].to_numpy().flatten())
pd.DataFrame(mat).to_csv(name + '/' + name + '_weights.tsv', sep="\t")
return mat
def gaussian_mixture_model(ref_dict, df, subtypes, name):
print('Running Gaussian Mixture Model Predictor on ' + name + ' . . . ')
features = list(ref_dict.keys())
samples = list(df.index)
predictions = pd.DataFrame(0, index=df.index, columns=['LR', 'Prediction'])
# latents = [0.5, 0.5]
for sample in samples:
tfx = df.loc[sample, 'TFX']
score_mat = pd.DataFrame(1, index=features, columns=[subtypes[0], subtypes[1], 'LR'])
for feature in features:
try:
feature_val = df.loc[sample, feature]
except KeyError:
continue
exp_a = tfx * ref_dict[feature][subtypes[0] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_a = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[0] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
exp_b = tfx * ref_dict[feature][subtypes[1] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_b = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[1] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
range_a = [exp_a - 3 * std_a, exp_a + 3 * std_a]
range_b = [exp_b - 3 * std_b, exp_b + 3 * std_b]
range_min, range_max = [min([item for sublist in [range_a, range_b] for item in sublist]),
max([item for sublist in [range_a, range_b] for item in sublist])]
pdf_a = norm.pdf(feature_val, loc=exp_a, scale=std_a)
pdf_b = norm.pdf(feature_val, loc=exp_b, scale=std_b)
if np.isnan(pdf_a) or np.isnan(pdf_b) or pdf_a == 0 or pdf_b == 0\
or np.isinf(pdf_a) or np.isinf(pdf_b) or not range_min < feature_val < range_max:
pdf_a = 1
pdf_b = 1
# score_mat.loc[feature, subtypes[0]] = pdf_a
# score_mat.loc[feature, subtypes[1]] = pdf_b
score_mat.loc[feature, 'LR'] = np.log(pdf_a / pdf_b)
# plot features for specific samples
# if sample in ['FH0200_E_2_A', 'FH0312_E_1_A', 'FH0486_E_2_A']:
# plt.figure(figsize=(8, 8))
# x = np.linspace(range_min, range_max, 100)
# plt.plot(x, norm.pdf(x, exp_a, std_a), c=colors[1], label='Shifted ARPC')
# plt.plot(x, norm.pdf(x, exp_b, std_b), c=colors[3], label='Shifted NEPC')
# plt.axvline(x=feature_val, c=colors[5], label='Sample Value')
# plt.ylabel('Density')
# plt.xlabel(feature)
# plt.legend()
# plt.title(sample + ' ' + feature + ' Shifted Curves and Sample Value', size=14)
# plt.savefig(name + '/' + name + '_' + sample + '_' + feature + '.pdf', bbox_inches="tight")
# plt.close()
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(score_mat)
# gamma_a = score_mat[subtypes[0]].product(axis=0) * latents[0]
# gamma_b = score_mat[subtypes[1]].product(axis=0) * latents[1]
# marginal = gamma_a + gamma_b
# print(str(gamma_a) + '\t' + str(gamma_b) + '\t' + str(marginal))
# predictions.loc[sample, subtypes[0]] = gamma_a / marginal
# predictions.loc[sample, subtypes[1]] = gamma_b / marginal
# predictions.loc[sample, 'LR'] = np.log(gamma_a) - np.log(gamma_b)
predictions.loc[sample, 'LR'] = score_mat['LR'].sum(axis=0)
if predictions.loc[sample, 'LR'] > 2.3:
predictions.loc[sample, 'Prediction'] = subtypes[0]
elif predictions.loc[sample, 'LR'] < -2.3:
predictions.loc[sample, 'Prediction'] = subtypes[1]
else:
predictions.loc[sample, 'Prediction'] = 'Indeterminate'
predictions.to_csv(name + '/' + name + '_predictions.tsv', sep="\t")
# print('Predictions:')
# print(predictions)
def gaussian_mixture_model_v2(ref_dict, df, subtypes, name):
print('Running Gaussian Mixture Model Predictor (non-bianry) on ' + name + ' . . . ')
features = list(ref_dict.keys())
samples = list(df.index)
predictions = pd.DataFrame(0, index=df.index, columns=[subtypes[0], subtypes[1], 'Prediction'])
for sample in samples:
tfx = df.loc[sample, 'TFX']
score_mat = pd.DataFrame(1, index=features, columns=[subtypes[0], subtypes[1]])
for feature in features:
try:
feature_val = df.loc[sample, feature]
except KeyError:
continue
exp_a = tfx * ref_dict[feature][subtypes[0] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_a = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[0] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
exp_b = tfx * ref_dict[feature][subtypes[1] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_b = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[1] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
range_a = [exp_a - 3 * std_a, exp_a + 3 * std_a]
range_b = [exp_b - 3 * std_b, exp_b + 3 * std_b]
range_min, range_max = [min([item for sublist in [range_a, range_b] for item in sublist]),
max([item for sublist in [range_a, range_b] for item in sublist])]
pdf_a = norm.pdf(feature_val, loc=exp_a, scale=std_a)
pdf_b = norm.pdf(feature_val, loc=exp_b, scale=std_b)
pdf_healthy = norm.pdf(feature_val, loc=ref_dict[feature]['Healthy_Mean'], scale=ref_dict[feature]['Healthy_Std'])
if np.isnan(pdf_a) or np.isnan(pdf_healthy) or pdf_a == 0 or pdf_healthy == 0\
or np.isinf(pdf_a) or np.isinf(pdf_healthy) or not range_min < feature_val < range_max:
score_mat.loc[feature, subtypes[0]] = 0
else:
score_mat.loc[feature, subtypes[0]] = np.log(pdf_a / pdf_healthy)
if np.isnan(pdf_b) or np.isnan(pdf_healthy) or pdf_b == 0 or pdf_healthy == 0\
or np.isinf(pdf_b) or np.isinf(pdf_healthy) or not range_min < feature_val < range_max:
score_mat.loc[feature, subtypes[1]] = 0
else:
score_mat.loc[feature, subtypes[1]] = np.log(pdf_b / pdf_healthy)
predictions.loc[sample, subtypes[0]] = score_mat[subtypes[0]].sum(axis=0)
predictions.loc[sample, subtypes[1]] = score_mat[subtypes[1]].sum(axis=0)
ar_score = predictions.loc[sample, subtypes[0]]
ne_score = predictions.loc[sample, subtypes[1]]
if ar_score > 2.3 and ar_score > 2 * ne_score:
predictions.loc[sample, 'Prediction'] = subtypes[0]
elif ne_score > 2.3 and ne_score > 2 * ar_score:
predictions.loc[sample, 'Prediction'] = subtypes[1]
elif ar_score > 2.3 and ne_score > 2.3:
predictions.loc[sample, 'Prediction'] = 'Amphicrine'
else:
predictions.loc[sample, 'Prediction'] = 'Indeterminate/DNPC'
predictions.to_csv(name + '/' + name + '_categorical-predictions.tsv', sep="\t")
def Find_Optimal_Cutoff(target, predicted):
""" Find the optimal probability cutoff point for a classification model related to event rate
Parameters
----------
target : Matrix with dependent or target data, where rows are observations
predicted : Matrix with predicted data, where rows are observations
Returns
-------
list type, with optimal cutoff value
"""
fpr, tpr, threshold = metrics.roc_curve(target, predicted)
i = np.arange(len(tpr))
roc = pd.DataFrame({'tf': pd.Series(tpr - (1 - fpr), index=i), 'threshold': pd.Series(threshold, index=i)})
roc_t = roc.iloc[(roc.tf - 0).abs().argsort()[:1]]
return list(roc_t['threshold'])
def specificity_sensitivity(target, predicted, threshold):
thresh_preds = np.zeros(len(predicted))
thresh_preds[predicted > threshold] = 1
cm = metrics.confusion_matrix(target, thresh_preds)
return cm[1, 1] / (cm[1, 0] + cm[1, 1]), cm[0, 0] / (cm[0, 0] + cm[0, 1])
def nroc_curve(y_true, predicted, num_thresh=100):
step = 1/num_thresh
thresholds = np.arange(0, 1 + step, step)
fprs, tprs = [], []
for threshold in thresholds:
y_pred = np.where(predicted >= threshold, 1, 0)
fp = np.sum((y_pred == 1) & (y_true == 0))
tp = np.sum((y_pred == 1) & (y_true == 1))
fn = np.sum((y_pred == 0) & (y_true == 1))
tn = np.sum((y_pred == 0) & (y_true == 0))
fprs.append(fp / (fp + tn))
tprs.append(tp / (tp + fn))
return fprs, tprs, thresholds
def beta_descent(ref_dict, df, subtypes, name, eval, order=None, base_df=None):
print('Running Heterogeneous Beta Predictor on ' + name + ' . . . ')
if not os.path.exists(name + '/'):
os.makedirs(name + '/')
features = list(ref_dict.keys())
cols = subtypes
cols.append('Prediction')
samples = list(df.index)
if eval == 'Bar':
predictions = pd.DataFrame(0, index=df.index, columns=[subtypes[0], subtypes[1], 'TFX', 'Prediction', 'Depth',
subtypes[0] + '_PLL', subtypes[1] + '_PLL', 'JPLL'])
feature_pdfs = pd.DataFrame(columns=['Sample', 'TFX', 'Feature', 'Value',
subtypes[0] + '_s-mean', subtypes[1] + '_s-mean',
subtypes[0] + '_s-std', subtypes[1] + '_s-std',
subtypes[0] + '_pdf', subtypes[1] + '_pdf'])
else:
predictions = pd.DataFrame(0, index=df.index, columns=[subtypes[0], subtypes[1], 'TFX', 'Prediction',
subtypes[0] + '_PLL', subtypes[1] + '_PLL', 'JPLL'])
# predictions['Subtype'] = df['Subtype']
# predictions['Subtype'] = df['NEPC']
predictions['Subtype'] = 'Unknown'
i = 0
for sample in samples:
tfx = df.loc[sample, 'TFX']
pdf_set_a, pdf_set_b = [], []
if base_df is not None: # recompute reference dictionary without samples
if eval == 'Triplet':
sample_comp_1 = sample.split('_')[0] + '_LuCaP'
sample_comp_2 = sample.split('_')[1] + '_LuCaP'
ref_dict = metric_analysis(base_df.drop([sample_comp_1, sample_comp_2]), name)
else:
sample_comp = sample.split('_')[0] + '_LuCaP'
ref_dict = metric_analysis(base_df.drop(sample_comp), name)
for feature in features:
try:
feature_val = df.loc[sample, feature]
except KeyError:
continue
exp_a = tfx * ref_dict[feature][subtypes[0] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_a = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[0] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
exp_b = tfx * ref_dict[feature][subtypes[1] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_b = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[1] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
pdf_a = norm.pdf(feature_val, loc=exp_a, scale=std_a)
pdf_b = norm.pdf(feature_val, loc=exp_b, scale=std_b)
if np.isfinite(pdf_a) and np.isfinite(pdf_b) and pdf_a != 0 and pdf_b != 0:
pdf_set_a.append(pdf_a)
pdf_set_b.append(pdf_b)
# feature_pdfs.loc[i] = [sample, tfx, feature, feature_val, exp_a, exp_b, std_a, std_b, pdf_a, pdf_b]
i += 1
def objective(theta):
log_likelihood = 0
for val_1, val_2 in zip(pdf_set_a, pdf_set_b):
joint_pdf = theta * val_1 + (1 - theta) * val_2
if joint_pdf > 0:
log_likelihood += np.log(joint_pdf)
return -1 * log_likelihood
def final_pdf(final_weight):
log_likelihood_a, log_likelihood_b, jpdf = 0, 0, 0
for val_1, val_2 in zip(pdf_set_a, pdf_set_b):
joint_a, joint_b = final_weight * val_1, (1 - final_weight) * val_2
joint_pdf = final_weight * val_1 + (1 - final_weight) * val_2
if joint_a > 0:
log_likelihood_a += np.log(joint_a)
if joint_b > 0:
log_likelihood_b += np.log(joint_b)
if joint_pdf > 0:
jpdf += np.log(joint_pdf)
return log_likelihood_a, log_likelihood_b, jpdf
weight_1 = minimize_scalar(objective, bounds=(0, 1), method='bounded').x
final_pdf_a, final_pdf_b, final_jpdf = final_pdf(weight_1)
predictions.loc[sample, 'TFX'] = tfx
if eval == 'Bar':
predictions.loc[sample, 'Depth'] = df.loc[sample, 'Depth']
predictions.loc[sample, 'JPLL'] = final_jpdf
predictions.loc[sample, subtypes[0]], predictions.loc[sample, subtypes[1]] = np.round(weight_1, 4), np.round(1 - weight_1, 4)
predictions.loc[sample, subtypes[0] + '_PLL'], predictions.loc[sample, subtypes[1] + '_PLL'] = final_pdf_a, final_pdf_b
if predictions.loc[sample, subtypes[0]] > 0.9:
predictions.loc[sample, 'Prediction'] = subtypes[0]
elif predictions.loc[sample, subtypes[0]] < 0.1:
predictions.loc[sample, 'Prediction'] = subtypes[1]
elif predictions.loc[sample, subtypes[0]] > 0.5:
predictions.loc[sample, 'Prediction'] = 'Mixed_' + subtypes[0]
else:
predictions.loc[sample, 'Prediction'] = 'Mixed_' + subtypes[1]
predictions.to_csv(name + '/' + name + '_beta-predictions.tsv', sep="\t")
# feature_pdfs.to_csv(name + '/' + name + '_feature-values_pdfs.tsv', sep="\t")
# if eval == 'Bar': # for benchmarking
# depths = ['0.2X', '1X', '25X']
# bench_targets = [0.01, 0.03, 0.05, 0.1, 0.2, 0.3]
# # bench_colors = ['#1c9964', '#4b9634', '#768d00', '#a47d00', '#d35e00', '#ff0000']
# # bench_palette = {bench_targets[i]: bench_colors[i] for i in range(len(bench_targets))}
# df_bar = pd.DataFrame(columns=['Depth', 'TFX', 'AUC'])
# for depth in depths:
# for category in bench_targets:
# sub_df = predictions.loc[predictions['TFX'] == category]
# sub_df = sub_df.loc[sub_df['Depth'] == depth]
# y = pd.factorize(sub_df['Subtype'].values)[0]
# fpr, tpr, threshold = metrics.roc_curve(y, sub_df['NEPC'].values)
# roc_auc = metrics.auc(fpr, tpr)
# df_bar.loc[len(df_bar.index)] = [depth, category, roc_auc]
# plt.figure(figsize=(12, 8))
# sns.barplot(x='TFX', y='AUC', hue='Depth', data=df_bar)
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# plt.savefig(name + '/' + 'AUCBarPlot.pdf', bbox_inches="tight")
# plt.close()
# df_bar.to_csv(name + '/' + 'AUCList.tsv', sep="\t")
if eval == 'Bar': # for benchmarking
depths = ['0.2X', '1X', '25X']
bench_targets = [0.01, 0.03, 0.05, 0.1, 0.2, 0.3]
predictions = predictions[predictions['TFX'] != 0.03]
for depth in depths:
df = predictions.loc[predictions['Depth'] == depth]
plt.figure(figsize=(8, 8))
# sns.boxplot(x='TFX', y='NEPC', hue='Subtype', data=df, order=bench_targets, boxprops=dict(alpha=.3), palette=palette)
sns.swarmplot(x='TFX', y='NEPC', hue='Subtype', palette=palette, data=df, s=10, alpha=0.8, dodge=False)
plt.ylabel('NEPC Score')
plt.xlabel('Tumor Fraction')
plt.title('Benchmarking Scores at ' + depth, size=14, y=1.1)
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.savefig(name + '/' + depth + '_BoxPlot.pdf', bbox_inches="tight")
plt.close()
if eval == 'SampleBar':
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
if order is not None:
predictions = predictions.reindex(order.index)
predictions = predictions.sort_values('NEPC')
predictions['NEPC'] = predictions['NEPC'] - 0.3314
data = predictions.groupby(predictions['NEPC']).size()
cmap = LinearSegmentedColormap.from_list('', ['#0077BB', '#CC3311'])
cm.register_cmap("mycolormap", cmap)
if order is not None:
predictions = predictions.reindex(order.index)
pal = sns.color_palette("mycolormap", len(data))
sns.set_context(rc={'patch.linewidth': 0.0})
plt.figure(figsize=(3, 2))
g = sns.barplot(x=predictions.index, y='NEPC', hue='NEPC', data=predictions, palette=pal, dodge=False)
g.legend_.remove()
sns.scatterplot(x=predictions.index, y='NEPC', hue='NEPC', data=predictions, palette=pal, s=600, legend=False)
def change_width(ax, new_value):
for patch in ax.patches:
current_width = patch.get_width()
diff = current_width - new_value
# we change the bar width
patch.set_width(new_value)
# we recenter the bar
patch.set_x(patch.get_x() + diff * .5)
change_width(g, .2)
for item in g.get_xticklabels():
item.set_rotation(45)
plt.axhline(y=0, color='b', linestyle='--', lw=2)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(name + '/PredictionBarPlot.pdf', bbox_inches="tight")
plt.close()
plt.figure(figsize=(3, 1))
################# Bar Plot thing #################
# df = pd.DataFrame({'Sample': predictions.index,
# 'ARPC': [0.75, 0.85, 0],
# 'NEPC': [0.0, 0.0, 0.75],
# 'AMPC': [0.25, 0.15, 0.25]})
# df.set_index('Sample').plot(kind='bar', stacked=True, color=['#0077BB', '#CC3311', '#800080'])
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# plt.savefig(name + '/Types.pdf', bbox_inches="tight")
# plt.close()
if eval == 'AUCBar': # for benchmarking
df_bar = pd.DataFrame(columns=['TFX', 'Depth', 'AUC'])
bench_targets = [0.01, 0.05, 0.1, 0.2, 0.3]
for category in bench_targets:
for depth in ['0.2X', '1X', '25X']:
sub_df = predictions[(df['TFX'] == category) & (df['Depth'] == depth)]
y = pd.factorize(sub_df['Subtype'].values)[0]
fpr, tpr, _ = metrics.roc_curve(y, sub_df['NEPC'])
auc = metrics.auc(fpr, tpr)
df_bar.loc[len(df_bar.index) + 1] = [category, depth, auc]
plt.figure(figsize=(8, 8))
sns.barplot(x='TFX', y='AUC', hue='Depth', data=df_bar)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(name + '/AUCBarPlot.pdf', bbox_inches="tight")
plt.close()
df_bar.to_csv(name + '/AUCList.tsv', sep="\t")
if eval == 'TripletBox': # Triplet Mixtures
plt.figure(figsize=(8, 8))
sns.boxplot(x='Subtype', y='NEPC', hue='TFX', data=predictions[predictions['TFX'] == 0.3])
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(name + '/TripletBoxPlot.pdf', bbox_inches="tight")
plt.close()
if eval == 'ROC':
predictions = predictions[predictions['Subtype'].isin(['ARPC', 'NEPC'])]
thresholds = pd.DataFrame(0, index=['AllTFX', '0.00-0.10', '0.10-1.00'],
columns=['OptimumThreshold', 'Sensitivity', 'Specificity'])
# All TFXs
plt.figure(figsize=(8, 8))
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='black')
y = pd.factorize(predictions['Subtype'].values)[0]
fpr, tpr, threshold = metrics.roc_curve(y, predictions['NEPC'].values)
# fpr, tpr, threshold = nroc_curve(y, predictions['NEPC'].values)
pd.DataFrame([threshold, tpr, [1 - val for val in fpr]],
index=['Threshold', 'Sensitivity', 'Specificity'],
dtype=float).transpose().to_csv(name + '/' + name + '_AllThresholds.tsv', sep="\t")
roc_auc = metrics.auc(fpr, tpr)
optimum_thresh = Find_Optimal_Cutoff(y, predictions['NEPC'].values)[0]
specificity, sensitivity = specificity_sensitivity(y, predictions['NEPC'].values, optimum_thresh)
print(specificity_sensitivity(y, predictions['NEPC'].values, 0.3314))
thresholds.loc['AllTFX'] = [optimum_thresh, specificity, sensitivity]
plt.plot(fpr, tpr, label='AUC = % 0.2f' % roc_auc, lw=4)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.savefig(name + '/' + name + '_ROC.pdf', bbox_inches="tight")
plt.close()
# by TFX
plt.figure(figsize=(8, 8))
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='black')
# 0.00 - 0.10
sub_df = predictions.loc[predictions['TFX'] < 0.10]
y = pd.factorize(sub_df['Subtype'].values)[0]
fpr, tpr, threshold = metrics.roc_curve(y, sub_df['NEPC'].values, drop_intermediate=False)
# fpr, tpr, threshold = nroc_curve(y, sub_df['NEPC'].values)
pd.DataFrame([threshold, tpr, [1 - val for val in fpr]],
index=['Threshold', 'Sensitivity', 'Specificity'],
dtype=float).transpose().to_csv(name + '/' + name + '_0.00-0.10Thresholds.tsv', sep="\t")
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label='TFX < 0.10: AUC = % 0.2f' % roc_auc, lw=4, color='#1c9964')
optimum_thresh = Find_Optimal_Cutoff(y, sub_df['NEPC'].values)[0]
specificity, sensitivity = specificity_sensitivity(y, sub_df['NEPC'].values, optimum_thresh)
thresholds.loc['0.00-0.10'] = [optimum_thresh, specificity, sensitivity]
# 0.25 - 1.00
sub_df = predictions.loc[predictions['TFX'] > 0.25]
y = pd.factorize(sub_df['Subtype'].values)[0]
fpr, tpr, threshold = metrics.roc_curve(y, sub_df['NEPC'].values)
# fpr, tpr, threshold = nroc_curve(y, sub_df['NEPC'].values)
pd.DataFrame([threshold, tpr, [1 - val for val in fpr]],
index=['Threshold', 'Sensitivity', 'Specificity'],
dtype=float).transpose().to_csv(name + '/' + name + '_0.1-1.00Thresholds.tsv', sep="\t")
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label='TFX > 0.10: AUC = % 0.2f' % roc_auc, lw=4, color='#ff0000')
optimum_thresh = Find_Optimal_Cutoff(y, sub_df['NEPC'].values)[0]
specificity, sensitivity = specificity_sensitivity(y, sub_df['NEPC'].values, optimum_thresh)
thresholds.loc['0.10-1.00'] = [optimum_thresh, specificity, sensitivity]
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.savefig(name + '/' + name + '_TFX-ROC.pdf', bbox_inches="tight")
plt.close()
thresholds.to_csv(name + '/' + name + '_Thresholds.tsv', sep="\t")
def product_column(a, b):
ab = []
for item_a in a:
for item_b in b:
ab.append(item_a + '_' + item_b)
return ab
def subset_data(df, sub_list):
regions = list(set([item.split('_')[0] for item in list(df.columns) if '_' in item]))
categories = list(set([item.split('_')[1] for item in list(df.columns) if '_' in item]))
features = list(set([item.split('_')[2] for item in list(df.columns) if '_' in item]))
sub_list += [region for region in regions if any(gene + '-' in region for gene in sub_list)]
sub_list = list(set(sub_list))
all_features = product_column(categories, features)
sub_features = product_column(sub_list, all_features)
sub_df = df[df.columns.intersection(sub_features)]
return pd.concat([df['Subtype'], sub_df], axis=1, join='inner')
def main():
test_data = 'emseq' # bench or patient_ULP/WGS or freed or triplet
# LuCaP dataframe - data is formatted in the "ExploreFM.py" pipeline
pickl = '/fh/fast/ha_g/user/rpatton/LuCaP_data/Exploration/LuCaP_FM.pkl'
print("Loading " + pickl)
df = pd.read_pickle(pickl)
df = df.drop('LB-Phenotype', axis=1)
df = df.rename(columns={'PC-Phenotype': 'Subtype'})
df = df[df['Subtype'] != 'AMPC']
df = df[df['Subtype'] != 'ARlow']
df = df[df.columns.drop(list(df.filter(regex='shannon-entropy')))]
df_lucap = df[df.columns.drop(list(df.filter(regex='mean-depth')))]
# Healthy dataframe - data is formatted in the "ExploreFM.py" pipeline
pickl = '/fh/fast/ha_g/user/rpatton/HD_data/Exploration/Healthy_FM.pkl'
print("Loading " + pickl)
df = pd.read_pickle(pickl)
df.insert(0, 'Subtype', 'Healthy')
df = df[df.columns.drop(list(df.filter(regex='shannon-entropy')))]
df_hd = df[df.columns.drop(list(df.filter(regex='mean-depth')))]
# Patient dataframe - data is formatted in the "ExploreFM.py" pipeline
if test_data == 'patient_WGS':
labels = pd.read_table('/fh/fast/ha_g/user/rpatton/patient-WGS_data/WGS_TF_hg19.txt',
sep='\t', index_col=0, names=['TFX'])
pickl = '/fh/fast/ha_g/user/rpatton/patient-WGS_data/Exploration/Patient_FM.pkl'
print("Loading " + pickl)
df = | pd.read_pickle(pickl) | pandas.read_pickle |
from pathlib import Path
from typing import Tuple
import pandas as pd
DATA_PATH: Path = Path("data")
FILE_PATHS: Tuple[Path, Path] = (DATA_PATH / "fifa21.json", DATA_PATH / "fifa20.json")
if __name__ == "__main__":
dfs = []
for file_path in FILE_PATHS:
df = pd.read_json(file_path)
print(f"{file_path.name}: {df.shape}")
dfs.append(df)
full_df = | pd.concat(dfs, ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
from scipy import stats
import sys, os, time, json
from pathlib import Path
import pickle as pkl
sys.path.append('../PreProcessing/')
sys.path.append('../Lib/')
sys.path.append('../Analyses/')
import sklearn.linear_model as lm
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import balanced_accuracy_score as bac
from joblib import Parallel, delayed
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.text import Text
import seaborn as sns
import analyses_table as AT
import TreeMazeFunctions as TMF
sns.set(style="whitegrid",font_scale=1,rc={
'axes.spines.bottom': False,
'axes.spines.left': False,
'axes.spines.right': False,
'axes.spines.top': False,
'axes.edgecolor':'0.5'})
def main(sePaths, doPlots=False, overwrite = False):
try:
dat = AT.loadSessionData(sePaths)
nUnits = dat['fitTable2'].shape[0]
# univariate analyses.
fn = sePaths['CueDesc_SegUniRes']
if ( (not fn.exists()) or overwrite):
CueDescFR_Dat, all_dat_spl = CueDesc_SegUniAnalysis(dat)
CueDescFR_Dat.to_csv(sePaths['CueDesc_SegUniRes'])
if doPlots:
plotCueVDes(CueDescFR_Dat,sePaths)
plotUnitRvL(CueDescFR_Dat,all_dat_spl,sePaths)
else:
CueDescFR_Dat = pd.read_csv(fn)
# decododer analyses
fn = sePaths['CueDesc_SegDecRes']
if ((not fn.exists()) or overwrite):
singCellDec,singCellDecSummary, popDec = CueDesc_SegDecAnalysis(dat)
singCellDec['se'] = sePaths['session']
singCellDecSummary['se'] = sePaths['session']
popDec['se'] = sePaths['session']
singCellDec.to_csv(fn)
singCellDecSummary.to_csv(sePaths['CueDesc_SegDecSumRes'])
popDec.to_csv(sePaths['PopCueDesc_SegDecSumRes'])
if doPlots:
f,_ = plotMultipleDecoderResults(singCellDecSummary)
fn = sePaths['CueDescPlots'] / ('DecResByUnit.jpeg')
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
f,_ = plotMultipleDecoderResults(popDec)
fn = sePaths['CueDescPlots'] / ('PopDecRes.jpeg')
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
for unit in np.arange(nUnits):
f,_ = plotMultipleDecoderResults(singCellDec[(singCellDec['unit']==unit)])
fn = sePaths['CueDescPlots'] / ('DecRes_UnitID-{}.jpeg'.format(unitNum) )
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
else:
singCellDec = pd.read_csv(fn)
singCellDecSummary = pd.read_csv(sePaths['CueDesc_SegDecSumRes'])
popDec = | pd.read_csv(sePaths['PopCueDesc_SegDecSumRes']) | pandas.read_csv |
""" A suite of functions for evaluating ID-variant corpora along (mostly) linguistic criteria.
Results are logged in text form and visualized in a format best-suited to the nature of the collected data. """
import os
import codecs
import numpy as np
import spacy as sc
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from nltk import ngrams
from string import digits
from string import ascii_uppercase
from scipy import stats
from cognitive_language_model.src.codebase.preprocessing import Indexer, read_text_file
def read_file(corpus_path, corpus_name, target_dir, opt):
""" Generates a frequency-sorted vocabulary from a corpus file. """
print('Generating the vocab file for the {:s} corpus ...'.format(corpus_name))
corpus_sents, _ = read_text_file(corpus_path, None, lower=True)
# Create and populate an Indexer object holing the target vocabulary
corpus_vocab = Indexer(opt, corpus_name, zipf_sort=True)
for i in range(len(corpus_sents)):
corpus_vocab.add_sentence(corpus_sents[i])
corpus_vocab.map_ids()
vocab_log_path = os.path.join(target_dir, '{:s}_raw_vocab.txt'.format(corpus_name))
# Write so obtained vocabulary to file, including word rank, identity, and frequency
with codecs.open(vocab_log_path, 'w', encoding='utf8') as vocab_file:
rank = 1
for value in corpus_vocab.index_to_word.values():
try:
vocab_file.write('{:d}\t{:s}\t{:d}\n'.format(rank, value, corpus_vocab.word_to_freq[value]))
rank += 1
except KeyError:
continue
# Calculate corpus statistics
vocab_file.write('=' * 10 + '\n')
vocab_file.write('Word frequency mean: {:.4f}\n'.format(np.mean(list(corpus_vocab.word_to_freq.values()))))
vocab_file.write('Word frequency standard deviation: {:.4f}\n'
.format(np.std(list(corpus_vocab.word_to_freq.values()))))
print('Done.')
return corpus_sents, corpus_vocab
def get_length_stats(corpus_sents, corpus_name, target_dir):
""" Collects sentence length counts for the specified corpus. """
# Collect individual sentence lengths associated with sentences within the corpus
sent_lens = [len(sent.split()) for sent in corpus_sents]
unique_lens = set(sent_lens)
# Count length frequencies
len_counts = [(a_len, sent_lens.count(a_len)) for a_len in unique_lens]
len_counts_sorted = sorted(len_counts, reverse=True, key=lambda x: x[1])
lens_log_path = os.path.join(target_dir, '{:s}_sentence_lengths.txt'.format(corpus_name))
# Write length counts to file
with codecs.open(lens_log_path, 'w', encoding='utf8') as len_file:
for i in range(len(len_counts_sorted)):
len_file.write('{:d}\t{:d}\n'.format(len_counts_sorted[i][0], len_counts_sorted[i][1]))
# Calculate corpus statistics
len_file.write('=' * 10 + '\n')
len_file.write('Sentence length max: {:d}\n'.format(np.max(sent_lens)))
len_file.write('Sentence length min: {:d}\n'.format(np.min(sent_lens)))
len_file.write('Sentence length mean: {:.4f}\n'.format(np.mean(sent_lens)))
len_file.write('Sentence length standard deviation: {:.4f}\n'.format(np.std(sent_lens)))
print('Done.')
def get_ngrams(corpus_sents, gram, corpus_name, target_dir):
""" Generates a set of n-grams for the specified granularity, corpus-wise;
here: used for 2-grams and 3-grams. """
print('Generating the {:d}-gram file for the {:s} corpus ...'.format(gram, corpus_name))
# Collect n-grams present within the corpus
ngram_lists = [list(ngrams(sent.split(), gram)) for sent in corpus_sents]
flat_ngrams = [ngram for ngram_list in ngram_lists for ngram in ngram_list]
# Assemble n-gram frequency dictionary
ngram_dict = dict()
for ngram in flat_ngrams:
if ngram in ngram_dict.keys():
ngram_dict[ngram] += 1
else:
ngram_dict[ngram] = 1
# Count the occurrences of unique n-grams
ngram_counts = list(ngram_dict.items())
# Sort n-grams by frequency
ngram_counts_sorted = sorted(ngram_counts, reverse=True, key=lambda x: x[1])
# Write n-gram distribution to file
ngram_log_path = os.path.join(target_dir, '{:s}_{:d}-gram_counts.txt'.format(corpus_name, gram))
with codecs.open(ngram_log_path, 'w', encoding='utf8') as ngram_file:
for i in range(len(ngram_counts_sorted)):
ngram_file.write('{:d}\t{}\t{:d}\n'.format(i + 1, ngram_counts_sorted[i][0], ngram_counts_sorted[i][1]))
print('Done.')
def get_parses(corpus_sents, corpus_name, target_dir):
""" Annotates a set of sentences with POS tags and dependency parses,
tracking tag frequencies and dependency arc lengths. """
print('Generating the parse files for the {:s} corpus ...'.format(corpus_name))
# Define POS tag inventory separated along the open/ closed class axis;
# exclude tags not associated with either class (such as 'filler' words), due to their relatively low frequency
# and low relevance for the contrastive analysis of the two ID-variant corpora
open_class_tags = ['FW', 'GW', 'JJ', 'JJR', 'JJS', 'NN', 'NNP', 'NNPS', 'NNS', 'PDT', 'RB', 'RBR', 'RBS', 'UH',
'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'WRB']
closed_class_tags = ['AFX', 'BES', 'CC', 'CD', 'DT', 'EX', 'HVS', 'IN', 'MD', 'POS', 'PRP', 'PRP$', 'RP', 'SYM',
'TO', 'WDT', 'WP', 'WP$']
all_tags = open_class_tags + closed_class_tags
# Parse corpus contents with SpaCy
model = sc.load('en')
parses = [model(sent) for sent in corpus_sents]
# Obtain tag counts for the specified tag inventory
flat_tags = [parse.tag_ for parsed_sent in parses for parse in parsed_sent]
unique_tags = set(flat_tags)
tag_counts = sorted([(tag, flat_tags.count(tag)) for tag in unique_tags if tag in all_tags],
reverse=True, key=lambda x: x[1])
# Calculate open class fraction (total and top 50%), to determine whether open classes are distributed differently
# in sentences of varying ID; intuitively one may expect high-ID sentences to contain a greater portion of open
# class words, as they exhibit greater variation and are therefore less predictable in sentential context
top_open = [tag_tpl[1] for tag_tpl in tag_counts[: len(tag_counts) // 2] if tag_tpl[0] in open_class_tags]
top_closed = [tag_tpl[1] for tag_tpl in tag_counts[: len(tag_counts) // 2] if tag_tpl[0] in closed_class_tags]
top_all = [tag_tpl[1] for tag_tpl in tag_counts[: len(tag_counts) // 2]]
top_open_fraction = sum(top_open) / sum(top_all)
top_closed_fraction = sum(top_closed) / sum(top_all)
full_open = [tag_tpl[1] for tag_tpl in tag_counts if tag_tpl[0] in open_class_tags]
full_closed = [tag_tpl[1] for tag_tpl in tag_counts if tag_tpl[0] in closed_class_tags]
full_all = [tag_tpl[1] for tag_tpl in tag_counts]
full_open_fraction = sum(full_open) / sum(full_all)
full_closed_fraction = sum(full_closed) / sum(full_all)
# Write tag counts to file
tag_log_path = os.path.join(target_dir, '{:s}_tag_counts.txt'.format(corpus_name))
with codecs.open(tag_log_path, 'w', encoding='utf8') as tag_file:
for i in range(len(tag_counts)):
tag_file.write('{:s}\t{:d}\n'.format(tag_counts[i][0], tag_counts[i][1]))
# Calculate corpus statistics
tag_file.write('=' * 10 + '\n')
tag_file.write('Open class fraction of most frequent 50% POS tags: {:.4f}\n'.format(top_open_fraction))
tag_file.write('Closed class fraction of most frequent 50% POS tags: {:.4f}\n'.format(top_closed_fraction))
tag_file.write('Open class fraction of all identified POS tags: {:.4f}\n'.format(full_open_fraction))
tag_file.write('Closed class fraction of all identified POS tags: {:.4f}'.format(full_closed_fraction))
print('Done with POS-tagging.')
# Perform dependency parsing related analysis
def _get_dlt(_parent, _children):
""" Computes the integration cost at the head of dependency relations identified within the input sentence,
according to the Dependency Locality Theory. """
dlt_cost = 0
for child in _children:
# Determine the span length between the child and parent node
left = min(_parent.i, child.i)
right = max(_parent.i, child.i)
for j in range(left + 1, right):
# Identify discourse referents present within the determined span
if 'NN' in parse[j].tag_ or 'VB' in parse[j].tag_:
dlt_cost += 1
# Check if the parent node is also occupied by a new discourse referent
if 'NN' in _parent.tag_ or 'VB' in _parent.tag_:
dlt_cost += 1
return dlt_cost
corpus_spans = list()
corpus_costs = list()
# Compute the mean dependency arc length and DLT integration cost for each sentence within the corpus
for parse in parses:
sent_spans = list()
sent_costs = list()
for parent in parse:
children = [w for w in parent.lefts] + [w for w in parent.rights]
if len(children) == 0:
continue
parent_spans = [abs(parent.i - child.i) for child in children]
sent_spans += parent_spans
sent_costs += [_get_dlt(parent, children)]
# Collect means
corpus_spans += [np.mean(sent_spans)]
corpus_costs += [np.mean(sent_costs)]
# Calculate SVO fraction (ultimately did not yield any interesting insights)
clause_triples = list()
svo_count = 0
other_count = 0
for parse in parses:
# Identify subjects, predicates, and objects
subjects = [[word.i, word.head.i] for word in parse if 'subj' in word.dep_ and word.head.pos_ == 'VERB']
objects = [[word.head.i, word.i] for word in parse if 'obj' in word.dep_]
for subj_list in subjects:
for obj_list in objects:
if subj_list[-1] == obj_list[0]:
clause_triple = subj_list + obj_list[-1:]
clause_triples.append(clause_triple)
# Check if isolated triples are in the SVO order, increment counter if so
if clause_triple[0] < clause_triple[1] < clause_triple[2]:
svo_count += 1
else:
other_count += 1
# Compute word order fractions
svo_fraction = svo_count / len(clause_triples)
other_fraction = other_count / len(clause_triples)
# Write mean sentence-wise dependency arc lengths and DLT integration costs to file
parse_log_path = os.path.join(target_dir, '{:s}_parse_stats.txt'.format(corpus_name))
with codecs.open(parse_log_path, 'w', encoding='utf8') as parse_file:
# Document mean sentence dependency arc length and mean sentence DLT integration cost
for i in range(len(corpus_spans)):
parse_file.write('{:.4f}\t{:.4f}\n'.format(corpus_spans[i], corpus_costs[i]))
# Calculate corpus statistics
parse_file.write('=' * 10 + '\n')
parse_file.write('Span length max: {:.4f}\n'.format(np.max(corpus_spans)))
parse_file.write('Span length min: {:.4f}\n'.format(np.min(corpus_spans)))
parse_file.write('Span length mean: {:.4f}\n'.format(np.mean(corpus_spans)))
parse_file.write('Span length standard deviation: {:.4f}\n'.format(np.std(corpus_spans)))
parse_file.write('=' * 10 + '\n')
parse_file.write('DLT cost max: {:.4f}\n'.format(np.max(corpus_costs)))
parse_file.write('DLT cost min: {:.4f}\n'.format(np.min(corpus_costs)))
parse_file.write('DLT cost mean: {:.4f}\n'.format(np.mean(corpus_costs)))
parse_file.write('DLT cost standard deviation: {:.4f}\n'.format(np.std(corpus_costs)))
# Document word order distribution
parse_file.write('=' * 10 + '\n')
parse_file.write('SVO clauses count: {:d}\n'.format(svo_count))
parse_file.write('SVO clauses fraction: {:.4f}\n'.format(svo_fraction))
parse_file.write('Other clauses count: {:d}\n'.format(other_count))
parse_file.write('Other clauses fraction: {:.4f}'.format(other_fraction))
print('Done with dependency parsing.')
def get_vocab_overlap(vocab_a, vocab_b, freq_bound, corpus_name_a, corpus_name_b, target_dir):
""" Calculates the overlap of the vocabularies provided, total and among words occurring with a frequency
greater than the specified threshold; no immediately interpretable results could be obtained. """
print('Comparing corpora-specific vocabularies ...')
# Keep a list of individual word types for each vocabulary
word_list_a = list(vocab_a.word_to_index.keys())
word_list_b = list(vocab_b.word_to_index.keys())
# Extract 'high-frequency' words (frequency bound is set arbitrarily)
bound_list_a = [word for word in word_list_a if word in vocab_a.word_to_freq.keys() and
vocab_a.word_to_freq[word] >= freq_bound]
bound_list_b = [word for word in word_list_b if word in vocab_b.word_to_freq.keys() and
vocab_b.word_to_freq[word] >= freq_bound]
# Calculate total word type overlap
total_shared_vocab = [word for word in word_list_a if word in word_list_b]
total_shared_words = len(total_shared_vocab)
vocab_a_total_unique_words = len(word_list_a) - total_shared_words
vocab_b_total_unique_words = len(word_list_b) - total_shared_words
# Calculate frequency-bounded word type overlap
bound_shared_vocab = [word for word in bound_list_a if word in bound_list_b]
bound_shared_words = len(bound_shared_vocab)
vocab_a_bound_unique_words = len(bound_list_a) - bound_shared_words
vocab_b_bound_unique_words = len(bound_list_b) - bound_shared_words
# Calculate overlap fractions
vocab_a_total_overlap_fraction = total_shared_words / len(word_list_a)
vocab_a_bound_overlap_fraction = bound_shared_words / len(bound_list_a)
vocab_b_total_overlap_fraction = total_shared_words / len(word_list_b)
vocab_b_bound_overlap_fraction = bound_shared_words / len(bound_list_b)
# Write collected information to file
overlap_log_path = os.path.join(target_dir, '{:s}_{:s}_vocab_overlap.txt'.format(corpus_name_a, corpus_name_b))
with codecs.open(overlap_log_path, 'w', encoding='utf8') as overlap_file:
overlap_file.write('Compared corpora: {:s} and {:s}\n'.format(corpus_name_a, corpus_name_b))
overlap_file.write('=' * 10 + '\n')
overlap_file.write('Total {:s} vocabulary size: {:d} words\n'.format(corpus_name_a, len(word_list_a)))
overlap_file.write('Total {:s} vocabulary size: {:d} words\n'.format(corpus_name_b, len(word_list_b)))
overlap_file.write('Bound {:s} vocabulary size for bound {:d}: {:d} words\n'
.format(corpus_name_a, freq_bound, len(bound_list_a)))
overlap_file.write('Bound fraction for {:s} corpus: {:.4f} words\n'
.format(corpus_name_a, len(bound_list_a) / len(word_list_a)))
overlap_file.write('Bound {:s} vocabulary for bound {:d} size: {:d} words\n'
.format(corpus_name_b, freq_bound, len(bound_list_b)))
overlap_file.write('Bound fraction for {:s} corpus: {:.4f} words\n'
.format(corpus_name_b, len(bound_list_b) / len(word_list_b)))
overlap_file.write('=' * 10 + '\n')
overlap_file.write('Total overlap size: {:d} words\n'.format(total_shared_words))
overlap_file.write('Total unique words for corpus {:s}: {:d}\n'
.format(corpus_name_a, vocab_a_total_unique_words))
overlap_file.write('Total unique words for corpus {:s}: {:d}\n'
.format(corpus_name_b, vocab_b_total_unique_words))
overlap_file.write('Frequency-bound overlap size for bound {:d}: {:d} words\n'.
format(freq_bound, bound_shared_words))
overlap_file.write('Frequency-bound unique words for corpus {:s}: {:d}\n'
.format(corpus_name_a, vocab_a_bound_unique_words))
overlap_file.write('Frequency-bound unique words for corpus {:s}: {:d}\n'
.format(corpus_name_b, vocab_b_bound_unique_words))
overlap_file.write('=' * 10 + '\n')
overlap_file.write('Total overlap fraction for corpus {:s}: {:.4f}\n'
.format(corpus_name_a, vocab_a_total_overlap_fraction))
overlap_file.write('Frequency-bound overlap fraction for corpus {:s} for bound {:d}: {:.4f}\n'
.format(corpus_name_a, freq_bound, vocab_a_bound_overlap_fraction))
overlap_file.write('=' * 10 + '\n')
overlap_file.write('Total overlap fraction for corpus {:s}: {:.4f}\n'
.format(corpus_name_b, vocab_b_total_overlap_fraction))
overlap_file.write('Frequency-bound overlap fraction for corpus {:s} for bound {:d}: {:.4f}\n'
.format(corpus_name_b, freq_bound, vocab_b_bound_overlap_fraction))
print('Done.')
def get_ngram_overlap(ngram_file_a, ngram_file_b, freq_bound, corpus_name_a, corpus_name_b, target_dir):
""" Calculates the overlap of the ngram-lists provided, total and among ngrams occurring with a frequency
greater than the specified threshold; no immediately interpretable results could be obtained."""
print('Comparing n-gram inventories ...')
# Read in corpus files
df_ngram_counts_a = pd.read_table(ngram_file_a, header=None, names=['Rank', 'Ngram', 'Counts'],
skip_blank_lines=True)
df_ngram_counts_b = pd.read_table(ngram_file_b, header=None, names=['Rank', 'Ngram', 'Counts'],
skip_blank_lines=True)
# Build n-gram inventories
ngram_list_a = [df_ngram_counts_a.iloc[row_id, 1] for row_id in range(len(df_ngram_counts_a))]
ngram_list_b = [df_ngram_counts_b.iloc[row_id, 1] for row_id in range(len(df_ngram_counts_b))]
bound_list_a = [df_ngram_counts_a.iloc[row_id, 1] for row_id in range(len(df_ngram_counts_a))
if int(df_ngram_counts_a.iloc[row_id, 2]) >= freq_bound]
bound_list_b = [df_ngram_counts_b.iloc[row_id, 1] for row_id in range(len(df_ngram_counts_b))
if int(df_ngram_counts_b.iloc[row_id, 2]) >= freq_bound]
# Calculate total unique n-gram overlap
total_shared_ngrams = [ngram for ngram in ngram_list_a if ngram in ngram_list_b]
total_shared_count = len(total_shared_ngrams)
total_unique_ngrams_a = len(ngram_list_a) - total_shared_count
total_unique_ngrams_b = len(ngram_list_b) - total_shared_count
# Calculate frequency-bounded unique ngram overlap
bound_shared_ngrams = [ngram for ngram in bound_list_a if ngram in bound_list_b]
bound_shared_count = len(bound_shared_ngrams)
bound_unique_ngrams_a = len(bound_list_a) - bound_shared_count
bound_unique_ngrams_b = len(bound_list_b) - bound_shared_count
# Calculate overlap fractions
total_overlap_fraction_a = total_shared_count / len(ngram_list_a)
bound_overlap_fraction_a = bound_shared_count / len(bound_list_a)
total_overlap_fraction_b = total_shared_count / len(ngram_list_b)
bound_overlap_fraction_b = bound_shared_count / len(bound_list_b)
# Write collected information to file
gram_size = len(total_shared_ngrams[0].split())
overlap_log_path = os.path.join(target_dir, '{:s}_{:s}_{:d}-gram_overlap.txt'
.format(corpus_name_a, corpus_name_b, gram_size))
with codecs.open(overlap_log_path, 'w', encoding='utf8') as overlap_file:
overlap_file.write('Compared corpora: {:s} and {:s}\n'.format(corpus_name_a, corpus_name_b))
overlap_file.write('=' * 10 + '\n')
overlap_file.write('Total {:s} vocabulary size: {:d} entries\n'.format(corpus_name_a, len(ngram_list_a)))
overlap_file.write('Total {:s} vocabulary size: {:d} entries\n'.format(corpus_name_b, len(ngram_list_b)))
overlap_file.write('Bound {:s} vocabulary size for bound {:d}: {:d} entries\n'
.format(corpus_name_a, freq_bound, len(bound_list_a)))
overlap_file.write('Bound fraction for {:s} corpus: {:.4f} entries\n'
.format(corpus_name_a, len(bound_list_a) / len(ngram_list_a)))
overlap_file.write('Bound {:s} vocabulary for bound {:d} size: {:d} entries\n'
.format(corpus_name_b, freq_bound, len(bound_list_b)))
overlap_file.write('Bound fraction for {:s} corpus: {:.4f} entries\n'
.format(corpus_name_b, len(bound_list_b) / len(ngram_list_b)))
overlap_file.write('=' * 10 + '\n')
overlap_file.write('Total overlap size: {:d} entries\n'.format(total_shared_count))
overlap_file.write('Total unique {:d}-grams for corpus {:s}: {:d}\n'
.format(gram_size, corpus_name_a, total_unique_ngrams_a))
overlap_file.write('Total unique {:d}-grams for corpus {:s}: {:d}\n'
.format(gram_size, corpus_name_b, total_unique_ngrams_b))
overlap_file.write('Frequency-bound overlap size for bound {:d}: {:d} entries\n'.
format(freq_bound, bound_shared_count))
overlap_file.write('Frequency-bound unique {:d}-grams for corpus {:s}: {:d}\n'
.format(gram_size, corpus_name_a, bound_unique_ngrams_a))
overlap_file.write('Frequency-bound unique {:d}-grams for corpus {:s}: {:d}\n'
.format(gram_size, corpus_name_b, bound_unique_ngrams_b))
overlap_file.write('=' * 10 + '\n')
overlap_file.write('Total overlap fraction for corpus {:s}: {:.4f}\n'
.format(corpus_name_a, total_overlap_fraction_a))
overlap_file.write('Frequency-bound overlap fraction for corpus {:s} for bound {:d}: {:.4f}\n'
.format(corpus_name_a, freq_bound, bound_overlap_fraction_a))
overlap_file.write('=' * 10 + '\n')
overlap_file.write('Total overlap fraction for corpus {:s}: {:.4f}\n'
.format(corpus_name_b, total_overlap_fraction_b))
overlap_file.write('Frequency-bound overlap fraction for corpus {:s} for bound {:d}: {:.4f}\n'
.format(corpus_name_b, freq_bound, bound_overlap_fraction_b))
print('Done.')
def construct_annotated_corpora(extraction_path, id_variant_path, corpus_name, target_dir):
""" Compiles ID-variant corpora annotated with evaluation-relevant information, i.e. normalized surprisal,
normalized UID, and sentence length, by extracting low-ID and high-ID entries from the annotated 90k Europarl
corpus. """
# Read in main ID-annotated file
df_annotated = pd.read_table(extraction_path, header=None,
names=['Sentence', 'Total_surprisal', 'Per_word_surprisal', 'Normalized_surprisal',
'Total_UID_divergence', 'Per_word_UID_divergence', 'Normalized_UID_divergence'],
skip_blank_lines=True)
if id_variant_path is not None:
# Extract ID-specific sentences from the reference corpus
df_variant = pd.read_table(id_variant_path, header=None, names=['Sentence'], skip_blank_lines=True)
target_list = df_variant.iloc[:, 0].tolist()
target_list = [sent.strip() for sent in target_list]
else:
# No extraction, entire reference corpus is considered for further steps
target_list = df_annotated.iloc[:, 0].tolist()
target_list = [sent.strip() for sent in target_list]
# Isolate evaluation-relevant features
df_features = df_annotated.loc[:, ['Sentence', 'Normalized_surprisal', 'Normalized_UID_divergence']]
surprisals = list()
uid_divs = list()
# Write the normalized surprisal and UID divergence distributions to file
features_log_path = os.path.join(target_dir, '{:s}_ID_features.txt'.format(corpus_name))
print('Writing to {:s} ...'.format(features_log_path))
with open(features_log_path, 'w') as id_file:
for line_id in range(len(df_features)):
sent = df_features.iloc[line_id][0]
sent_ns = df_features.iloc[line_id][1]
sent_nud = df_features.iloc[line_id][2]
if sent in target_list:
id_file.write('{:f}\t{:f}\n'.format(sent_ns, sent_nud))
surprisals += [float(sent_ns)]
uid_divs += [float(sent_nud)]
# Calculate corpus statistics
id_file.write('=' * 10 + '\n')
id_file.write('Surprisal max: {:.4f}\n'.format(np.max(surprisals)))
id_file.write('Surprisal min: {:.4f}\n'.format(np.min(surprisals)))
id_file.write('Surprisal mean: {:.4f}\n'.format(np.mean(surprisals)))
id_file.write('Surprisal standard deviation: {:.4f}\n'.format(np.std(surprisals)))
id_file.write('=' * 10 + '\n')
id_file.write('UID divergence max: {:.4f}\n'.format(np.max(uid_divs)))
id_file.write('UID divergence min: {:.4f}\n'.format(np.min(uid_divs)))
id_file.write('UID divergence mean: {:.4f}\n'.format(np.mean(uid_divs)))
id_file.write('UID divergence standard deviation: {:.4f}\n'.format(np.std(uid_divs)))
print('Done.')
def plot_dist(data_source, column_id, x_label, y_label, title=None, dtype=float):
""" Plots the histogram and the corresponding kernel density estimate for checking whether the data distribution is
approximately normal. """
def _filter_data(raw_data):
""" Filters plot-able data, by excluding lines containing corpus statistics and related information. """
legal_inventory = digits + '.'
filtered_data = list()
# Only retain numeric information
for data_point in raw_data:
skip = False
for symbol in list(str(data_point)):
if symbol not in legal_inventory:
skip = True
if not skip:
filtered_data.append(dtype(data_point))
return np.array(filtered_data)
# Set plot parameters
sns.set_style('whitegrid')
sns.set_context('paper')
# Read in source dataframe
df_features = pd.read_table(data_source, header=None, skip_blank_lines=True)
# Designate data to be visualized within the data base
if len(column_id) > 1:
# Transform data into a format better suited for a density plot
entries = _filter_data(df_features.iloc[:, column_id[0]].values)
counts = _filter_data(df_features.iloc[:, column_id[1]].values)
data_source = list()
for i in range(entries.shape[0]):
data_source += [entries[i]] * counts[i]
data_source = np.array(data_source)
else:
data_source = _filter_data(df_features.iloc[:, column_id[0]].values)
assert (type(data_source) == np.ndarray and len(data_source.shape) == 1), \
'Expected a one-dimensional numpy array.'
# Make plot
fig, ax = plt.subplots()
fig.set_size_inches(8, 6)
sns.distplot(data_source, kde=False, ax=ax, hist_kws=dict(edgecolor='w', linewidth=1))
# Adjust visuals
sns.despine()
plt.xlabel(x_label)
plt.ylabel(y_label)
if title is not None:
plt.title(title)
plt.show()
def plot_linear(data_source, column_ids, log_scale, x_label, y_label, title=None, dtype=int):
""" Visualizes a linear relationship between the provided data points. """
def _filter_data(raw_data):
""" Filters plot-able data. """
legal_inventory = digits + '.'
filtered_data = list()
# Only retain numeric information
for data_point in raw_data:
skip = False
for symbol in list(str(data_point)):
if symbol not in legal_inventory:
skip = True
if not skip:
filtered_data.append(dtype(data_point))
return np.array(filtered_data)
# Set plot parameters
sns.set_style('whitegrid')
sns.set_context('paper')
# Read in the source dataframe and isolate columns to be plotted
df_features = pd.read_table(data_source, header=None, skip_blank_lines=True)
x_data = _filter_data(df_features.iloc[:, column_ids[0]].values)
y_data = _filter_data(df_features.iloc[:, column_ids[1]].values)
assert (type(x_data) == np.ndarray and len(x_data.shape) == 1), \
'Expected a one-dimensional numpy array.'
assert (type(y_data) == np.ndarray and len(y_data.shape) == 1), \
'Expected a one-dimensional numpy array.'
# Optionally, use log axis (e.g. for plotting ranked word type frequencies)
if log_scale:
fig, ax = plt.subplots()
ax.set(xscale="log", yscale="log")
else:
fig, ax = plt.subplots()
ax = None
fig.set_size_inches(8, 6)
sns.regplot(x=x_data, y=y_data, ax=ax, fit_reg=False)
sns.despine()
plt.xlabel(x_label)
plt.ylabel(y_label)
if title is not None:
plt.title(title)
plt.show()
def plot_bar(source_files, column_ids, column_names, normalize, sort, plot_difference, freq_bound, title=None,
dtype=int):
""" Produces bar plots on the basis of the provided data, useful for comparing discrete quantities of distinct
entities. """
def _filter_data(raw_data, numerical):
""" Filters plot-able data. """
# Retain numeric information
legal_count_inventory = digits + '.'
# Retain POS tags, also
legal_entry_inventory = ascii_uppercase + '$'
filtered_data = list()
for data_point in raw_data:
skip = False
for symbol in list(str(data_point)):
if symbol not in legal_count_inventory and symbol not in legal_entry_inventory:
skip = True
if not skip:
if numerical:
filtered_data.append(dtype(data_point))
else:
filtered_data.append(data_point)
# Optionally normalize count values, resulting in a proportion plot
if numerical and normalize:
filtered_data = filtered_data / np.sum(filtered_data)
return np.array(filtered_data)
# Set plot parameters
sns.set_style('whitegrid')
sns.set_context('paper')
# Compile data to be plotted within a new dataframe
# Not necessary, but convenient when plotting with seaborn
source_dict = dict()
# Read in data and sort alphanumeric features (e.g. POS tags) alphabetically
df_features = pd.read_table(source_files[0], header=None, names=['Tag', 'Count'], skip_blank_lines=True)
df_features = df_features.sort_values('Tag', ascending=True)
df_reference = pd.read_table(source_files[1], header=None, names=['Tag', 'Count'], skip_blank_lines=True)
df_reference = df_reference.sort_values('Tag', ascending=True)
# Isolate columns to be plotted
entries = _filter_data(df_features.iloc[:, column_ids[0]].values, False)
counts = _filter_data(df_features.iloc[:, column_ids[1]].values, True) # e.g. counts from corpus A
reference_counts = _filter_data(df_reference.iloc[:, column_ids[1]].values, True) # e.g. counts from corpus B
# Construct dataframe to be visualized
source_dict[column_names[0]] = entries
source_dict['reference_counts'] = reference_counts
# Generate frequency mask to exclude low-frequency features from the plot
# Optional; results in a clearer, better readable visualization
frequency_mask = np.array(
[int(counts[i] >= freq_bound or reference_counts[i] >= freq_bound) for i in range(counts.shape[0])])
source_dict['frequency_mask'] = frequency_mask
# Calculate per-feature count differences (i.e. target counts vs. reference counts), if specified
if plot_difference:
diffs = counts - reference_counts
source_dict[column_names[1]] = diffs
else:
source_dict[column_names[1]] = counts
features = pd.DataFrame.from_dict(source_dict)
# Sort by count value and apply frequency mask
if sort:
features = features.sort_values(column_names[0], ascending=True)
if freq_bound > 0:
features = features.drop(features[features.frequency_mask == 0].index)
# Make plot
fig, ax = plt.subplots()
fig.set_size_inches(8, 6)
if plot_difference:
colors = ['coral' if feature >= 0 else 'skyblue' for feature in features[column_names[1]]]
sns.barplot(x=column_names[1], y=column_names[0], data=features, ax=ax, palette=colors)
else:
sns.barplot(x=column_names[1], y=column_names[0], data=features, ax=ax, palette='Set2')
sns.despine()
if title is not None:
plt.title(title)
plt.show()
def plot_grid_dist(source_files, data_ids, col_ids, column_names, title=None, dtype=float, count_values=False):
""" Combines a set of histograms within one shared visualization. """
def _filter_data(raw_data, _dtype=dtype):
""" Filters plot-able data. """
# Same functionality as for non-facet plots
legal_inventory = digits + '.'
filtered_data = list()
for data_point in raw_data:
skip = False
for symbol in list(str(data_point)):
if symbol not in legal_inventory:
skip = True
if not skip:
filtered_data.append(_dtype(data_point))
return filtered_data
# Combine data from multiple sources
source_dict = {column_names[i]: list() for i in range(len(column_names))}
assert (len(source_files) == len(data_ids) == len(col_ids)), \
'Input lists should be of the same length.'
for i in range(len(source_files)):
df_features = pd.read_table(source_files[i], header=None, skip_blank_lines=True)
features = _filter_data(df_features.iloc[:, data_ids[i]].values)
# Change data format to be better suited for distribution plots
if count_values:
# Assumes feature frequencies are given in a column immediately to the right of the feature column
counts = _filter_data(df_features.iloc[:, data_ids[i] + 1].values, _dtype=int)
expanded_features = list()
for j in range(len(features)):
expanded_features += [features[j]] * counts[j]
features = expanded_features
source_dict[column_names[0]] += features
source_dict[column_names[1]] += [col_ids[i]] * len(features)
# Convert values into array-like form for dataframe creation
for key in source_dict.keys():
source_dict[key] = np.array(source_dict[key])
# Compile dataframe to be plotted
features = pd.DataFrame.from_dict(source_dict)
# Set plot parameters
sns.set_style('whitegrid')
sns.set_context('paper')
# Make facet-plot
fgrid = sns.FacetGrid(features, col=column_names[1], size=4.5)
fgrid.map(sns.distplot, column_names[0], kde=True, bins=50, hist_kws=dict(edgecolor='w', linewidth=1))
plt.subplots_adjust(top=0.85)
if title is not None:
fgrid.fig.suptitle(title)
sns.despine()
plt.show()
def plot_grid_linear(source_files, x_data_ids, y_data_ids, col_ids, row_ids, column_names, title=None, dtype=int,
log_scale=True):
""" Combines a set of linear plots within one shared visualization. """
def _filter_data(raw_data):
""" Filters plotable data. """
# Same functionality as for non-facet plots
legal_inventory = digits + '.'
filtered_data = list()
for data_point in raw_data:
skip = False
for symbol in list(str(data_point)):
if symbol not in legal_inventory:
skip = True
if not skip:
filtered_data.append(dtype(data_point))
return filtered_data
# Combine data from multiple sources
source_dict = {column_names[i]: list() for i in range(len(column_names))}
assert (len(source_files) == len(x_data_ids) == len(y_data_ids) == len(col_ids) == len(row_ids)), \
'Input lists should be of the same length.'
for i in range(len(source_files)):
# Read in features to be visualized from each source file
df_features = pd.read_table(source_files[i], header=None, skip_blank_lines=True)
x_features = _filter_data(df_features.iloc[:, x_data_ids[i]].values)
y_features = _filter_data(df_features.iloc[:, y_data_ids[i]].values)
# Add features to the joint dictionary and denote their source
source_dict[column_names[0]] += x_features
source_dict[column_names[1]] += y_features
source_dict[column_names[2]] += [col_ids[i]] * len(x_features)
source_dict[column_names[3]] += [row_ids[i]] * len(x_features)
# Convert values into array-like form for dataframe creation
for key in source_dict.keys():
source_dict[key] = np.array(source_dict[key])
# Compile dataframe
features = pd.DataFrame.from_dict(source_dict)
# Set plot parameters
sns.set_style('whitegrid')
sns.set_context('paper')
# Make facet plot
fgrid = sns.FacetGrid(features, col=column_names[2], row=column_names[3], size=3.5)
# Optionally use log axes
if log_scale:
ax = fgrid.axes[0][0]
ax.set_xscale('log')
ax.set_yscale('log')
fgrid.map(sns.regplot, column_names[0], column_names[1], fit_reg=False)
plt.subplots_adjust(top=0.9)
if title is not None:
fgrid.fig.suptitle(title)
sns.despine()
plt.show()
def plot_grid_bar(source_files, col_ids, column_ids, column_names, normalize, freq_bound, grouped, title=None,
dtype=int):
""" Combines a set of bar plots within one shared visualization. """
def _filter_data(raw_data, numerical):
""" Filters plotable data. """
# Same functionality as for non-facet plots
legal_count_inventory = digits + '.'
legal_entry_inventory = ascii_uppercase + '$'
filtered_data = list()
for data_point in raw_data:
skip = False
for symbol in list(str(data_point)):
if symbol not in legal_count_inventory and symbol not in legal_entry_inventory:
skip = True
if not skip:
if numerical:
filtered_data.append(dtype(data_point))
else:
filtered_data.append(data_point)
if numerical and normalize:
filtered_data = filtered_data / np.sum(filtered_data)
return np.array(filtered_data)
# Combine data from multiple sources
source_dict = dict()
# Read in data and sort alphanumeric features (e.g. POS tags) alphabetically
df_features_a = pd.read_table(source_files[0], header=None, names=['Tag', 'Count'], skip_blank_lines=True)
df_features_a = df_features_a.sort_values('Tag', ascending=True)
df_features_b = pd.read_table(source_files[1], header=None, names=['Tag', 'Count'], skip_blank_lines=True)
df_features_b = df_features_b.sort_values('Tag', ascending=True)
# Isolate columns to be plotted
entries = _filter_data(df_features_a.iloc[:, column_ids[0]].values, False)
counts_a = _filter_data(df_features_a.iloc[:, column_ids[1]].values, True)
counts_b = _filter_data(df_features_b.iloc[:, column_ids[1]].values, True)
# Construct dataframe to be visualized
source_dict[column_names[0]] = entries
source_dict[column_names[1]] = counts_a
source_dict['temp'] = counts_b
# Generate frequency mask to exclude low-frequency features from the plot
frequency_mask = np.array(
[int(counts_a[i] >= freq_bound or counts_b[i] >= freq_bound) for i in range(counts_a.shape[0])])
source_dict['frequency_mask'] = frequency_mask
features = | pd.DataFrame.from_dict(source_dict) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
from psaw import PushshiftAPI
import pandas as pd
import datetime as dt
import time
import os
api = PushshiftAPI()
epoch = dt.datetime.utcfromtimestamp(0)
def epoch_time(dtdt):
return (dtdt - epoch).total_seconds()
def get_date(created):
return dt.datetime.fromtimestamp(created)
# See pushshift API for descriptions of arguments https://pushshift.io/api-parameters/
def get_reddit_comments(search_terms, subreddits, begin_time, end_time, out_dir="../data/reddit/comments"):
abs_out = os.path.abspath(out_dir)
if (not os.path.isdir(abs_out)):
os.makedirs(abs_out)
for term in search_terms:
data = api.search_comments(q=term, subreddit=subreddits, after=int(begin_time.timestamp()),
before=int(end_time.timestamp()), sort='asc', \
filter=['subreddit','id', 'score', 'body', 'created_utc'])
topics_dict = {
"subreddit" : [],
"id" : [],
"score" : [],
"body" : [],
"created_utc" : [],
"created" : []
}
tstamps = []
for comment in data:
topics_dict["subreddit"].append(comment.subreddit)
topics_dict["id"].append(comment.id)
topics_dict["body"].append(comment.body.replace('\r', ' ').replace('\n', ' '))
topics_dict["created_utc"].append(comment.created_utc)
topics_dict["score"].append(comment.score)
topics_dict["created"].append(get_date(comment.created_utc))
# tstamps = [get_date(i) for i in tstamps]
data = | pd.DataFrame(topics_dict) | pandas.DataFrame |
from thermostat.stats import combine_output_dataframes
from thermostat.stats import compute_summary_statistics
from thermostat.stats import summary_statistics_to_csv
from .fixtures.thermostats import thermostat_emg_aux_constant_on_outlier
from thermostat.multiple import multiple_thermostat_calculate_epa_field_savings_metrics
from thermostat.exporters import COLUMNS
from scipy.stats import norm, randint
import pandas as pd
import numpy as np
import json
from datetime import datetime
import tempfile
from itertools import islice, cycle
import pytest
def get_fake_output_df(n_columns):
columns = [
'sw_version',
'ct_identifier',
'equipment_type',
'heating_or_cooling',
'station',
'zipcode',
'climate_zone',
'start_date',
'end_date',
'n_days_in_inputfile_date_range',
'n_days_both_heating_and_cooling',
'n_days_insufficient_data',
'n_core_cooling_days',
'n_core_heating_days',
'baseline_percentile_core_cooling_comfort_temperature',
'baseline_percentile_core_heating_comfort_temperature',
'regional_average_baseline_cooling_comfort_temperature',
'regional_average_baseline_heating_comfort_temperature',
'percent_savings_baseline_percentile',
'avoided_daily_mean_core_day_runtime_baseline_percentile',
'avoided_total_core_day_runtime_baseline_percentile',
'baseline_daily_mean_core_day_runtime_baseline_percentile',
'baseline_total_core_day_runtime_baseline_percentile',
'_daily_mean_core_day_demand_baseline_baseline_percentile',
'percent_savings_baseline_regional',
'avoided_daily_mean_core_day_runtime_baseline_regional',
'avoided_total_core_day_runtime_baseline_regional',
'baseline_daily_mean_core_day_runtime_baseline_regional',
'baseline_total_core_day_runtime_baseline_regional',
'_daily_mean_core_day_demand_baseline_baseline_regional',
'mean_demand',
'alpha',
'tau',
'mean_sq_err',
'root_mean_sq_err',
'cv_root_mean_sq_err',
'mean_abs_err',
'mean_abs_pct_err',
'total_core_cooling_runtime',
'total_core_heating_runtime',
'total_auxiliary_heating_core_day_runtime',
'total_emergency_heating_core_day_runtime',
'daily_mean_core_cooling_runtime',
'daily_mean_core_heating_runtime',
'core_cooling_days_mean_indoor_temperature',
'core_cooling_days_mean_outdoor_temperature',
'core_heating_days_mean_indoor_temperature',
'core_heating_days_mean_outdoor_temperature',
'core_mean_indoor_temperature',
'core_mean_outdoor_temperature',
'rhu1_aux_duty_cycle',
'rhu1_emg_duty_cycle',
'rhu1_compressor_duty_cycle',
'rhu1_00F_to_05F',
'rhu1_05F_to_10F',
'rhu1_10F_to_15F',
'rhu1_15F_to_20F',
'rhu1_20F_to_25F',
'rhu1_25F_to_30F',
'rhu1_30F_to_35F',
'rhu1_35F_to_40F',
'rhu1_40F_to_45F',
'rhu1_45F_to_50F',
'rhu1_50F_to_55F',
'rhu1_55F_to_60F',
'rhu1_less10F',
'rhu1_10F_to_20F',
'rhu1_20F_to_30F',
'rhu1_30F_to_40F',
'rhu1_40F_to_50F',
'rhu1_50F_to_60F',
'rhu1_00F_to_05F_aux_duty_cycle',
'rhu1_05F_to_10F_aux_duty_cycle',
'rhu1_10F_to_15F_aux_duty_cycle',
'rhu1_15F_to_20F_aux_duty_cycle',
'rhu1_20F_to_25F_aux_duty_cycle',
'rhu1_25F_to_30F_aux_duty_cycle',
'rhu1_30F_to_35F_aux_duty_cycle',
'rhu1_35F_to_40F_aux_duty_cycle',
'rhu1_40F_to_45F_aux_duty_cycle',
'rhu1_45F_to_50F_aux_duty_cycle',
'rhu1_50F_to_55F_aux_duty_cycle',
'rhu1_55F_to_60F_aux_duty_cycle',
'rhu1_less10F_aux_duty_cycle',
'rhu1_10F_to_20F_aux_duty_cycle',
'rhu1_20F_to_30F_aux_duty_cycle',
'rhu1_30F_to_40F_aux_duty_cycle',
'rhu1_40F_to_50F_aux_duty_cycle',
'rhu1_50F_to_60F_aux_duty_cycle',
'rhu1_00F_to_05F_emg_duty_cycle',
'rhu1_05F_to_10F_emg_duty_cycle',
'rhu1_10F_to_15F_emg_duty_cycle',
'rhu1_15F_to_20F_emg_duty_cycle',
'rhu1_20F_to_25F_emg_duty_cycle',
'rhu1_25F_to_30F_emg_duty_cycle',
'rhu1_30F_to_35F_emg_duty_cycle',
'rhu1_35F_to_40F_emg_duty_cycle',
'rhu1_40F_to_45F_emg_duty_cycle',
'rhu1_45F_to_50F_emg_duty_cycle',
'rhu1_50F_to_55F_emg_duty_cycle',
'rhu1_55F_to_60F_emg_duty_cycle',
'rhu1_less10F_emg_duty_cycle',
'rhu1_10F_to_20F_emg_duty_cycle',
'rhu1_20F_to_30F_emg_duty_cycle',
'rhu1_30F_to_40F_emg_duty_cycle',
'rhu1_40F_to_50F_emg_duty_cycle',
'rhu1_50F_to_60F_emg_duty_cycle',
'rhu1_00F_to_05F_compressor_duty_cycle',
'rhu1_05F_to_10F_compressor_duty_cycle',
'rhu1_10F_to_15F_compressor_duty_cycle',
'rhu1_15F_to_20F_compressor_duty_cycle',
'rhu1_20F_to_25F_compressor_duty_cycle',
'rhu1_25F_to_30F_compressor_duty_cycle',
'rhu1_30F_to_35F_compressor_duty_cycle',
'rhu1_35F_to_40F_compressor_duty_cycle',
'rhu1_40F_to_45F_compressor_duty_cycle',
'rhu1_45F_to_50F_compressor_duty_cycle',
'rhu1_50F_to_55F_compressor_duty_cycle',
'rhu1_55F_to_60F_compressor_duty_cycle',
'rhu1_less10F_compressor_duty_cycle',
'rhu1_10F_to_20F_compressor_duty_cycle',
'rhu1_20F_to_30F_compressor_duty_cycle',
'rhu1_30F_to_40F_compressor_duty_cycle',
'rhu1_40F_to_50F_compressor_duty_cycle',
'rhu1_50F_to_60F_compressor_duty_cycle',
'rhu2_aux_duty_cycle',
'rhu2_emg_duty_cycle',
'rhu2_compressor_duty_cycle',
'rhu2_00F_to_05F',
'rhu2_05F_to_10F',
'rhu2_10F_to_15F',
'rhu2_15F_to_20F',
'rhu2_20F_to_25F',
'rhu2_25F_to_30F',
'rhu2_30F_to_35F',
'rhu2_35F_to_40F',
'rhu2_40F_to_45F',
'rhu2_45F_to_50F',
'rhu2_50F_to_55F',
'rhu2_55F_to_60F',
'rhu2_less10F',
'rhu2_10F_to_20F',
'rhu2_20F_to_30F',
'rhu2_30F_to_40F',
'rhu2_40F_to_50F',
'rhu2_50F_to_60F',
'rhu2_00F_to_05F_aux_duty_cycle',
'rhu2_05F_to_10F_aux_duty_cycle',
'rhu2_10F_to_15F_aux_duty_cycle',
'rhu2_15F_to_20F_aux_duty_cycle',
'rhu2_20F_to_25F_aux_duty_cycle',
'rhu2_25F_to_30F_aux_duty_cycle',
'rhu2_30F_to_35F_aux_duty_cycle',
'rhu2_35F_to_40F_aux_duty_cycle',
'rhu2_40F_to_45F_aux_duty_cycle',
'rhu2_45F_to_50F_aux_duty_cycle',
'rhu2_50F_to_55F_aux_duty_cycle',
'rhu2_55F_to_60F_aux_duty_cycle',
'rhu2_less10F_aux_duty_cycle',
'rhu2_10F_to_20F_aux_duty_cycle',
'rhu2_20F_to_30F_aux_duty_cycle',
'rhu2_30F_to_40F_aux_duty_cycle',
'rhu2_40F_to_50F_aux_duty_cycle',
'rhu2_50F_to_60F_aux_duty_cycle',
'rhu2_00F_to_05F_emg_duty_cycle',
'rhu2_05F_to_10F_emg_duty_cycle',
'rhu2_10F_to_15F_emg_duty_cycle',
'rhu2_15F_to_20F_emg_duty_cycle',
'rhu2_20F_to_25F_emg_duty_cycle',
'rhu2_25F_to_30F_emg_duty_cycle',
'rhu2_30F_to_35F_emg_duty_cycle',
'rhu2_35F_to_40F_emg_duty_cycle',
'rhu2_40F_to_45F_emg_duty_cycle',
'rhu2_45F_to_50F_emg_duty_cycle',
'rhu2_50F_to_55F_emg_duty_cycle',
'rhu2_55F_to_60F_emg_duty_cycle',
'rhu2_less10F_emg_duty_cycle',
'rhu2_10F_to_20F_emg_duty_cycle',
'rhu2_20F_to_30F_emg_duty_cycle',
'rhu2_30F_to_40F_emg_duty_cycle',
'rhu2_40F_to_50F_emg_duty_cycle',
'rhu2_50F_to_60F_emg_duty_cycle',
'rhu2_00F_to_05F_compressor_duty_cycle',
'rhu2_05F_to_10F_compressor_duty_cycle',
'rhu2_10F_to_15F_compressor_duty_cycle',
'rhu2_15F_to_20F_compressor_duty_cycle',
'rhu2_20F_to_25F_compressor_duty_cycle',
'rhu2_25F_to_30F_compressor_duty_cycle',
'rhu2_30F_to_35F_compressor_duty_cycle',
'rhu2_35F_to_40F_compressor_duty_cycle',
'rhu2_40F_to_45F_compressor_duty_cycle',
'rhu2_45F_to_50F_compressor_duty_cycle',
'rhu2_50F_to_55F_compressor_duty_cycle',
'rhu2_55F_to_60F_compressor_duty_cycle',
'rhu2_less10F_compressor_duty_cycle',
'rhu2_10F_to_20F_compressor_duty_cycle',
'rhu2_20F_to_30F_compressor_duty_cycle',
'rhu2_30F_to_40F_compressor_duty_cycle',
'rhu2_40F_to_50F_compressor_duty_cycle',
'rhu2_50F_to_60F_compressor_duty_cycle',
]
string_placeholder = ["PLACEHOLDER"] * n_columns
zero_column = [0 if randint.rvs(0, 30) > 0 else (None if randint.rvs(0, 2) > 0 else np.inf)
for i in randint.rvs(0, 1, size=n_columns)]
one_column = [1 if randint.rvs(0, 30) > 0 else (None if randint.rvs(0, 2) > 0 else np.inf)
for i in randint.rvs(0, 1, size=n_columns)]
float_column = [i if randint.rvs(0, 30) > 0 else (None if randint.rvs(0, 2) > 0 else np.inf)
for i in norm.rvs(size=n_columns)]
zipcodes = ["01234", "12345", "23456", "34567", "43210", "54321", "65432", "76543"]
zipcode_column = [i for i in islice(cycle(zipcodes), None, n_columns)]
core_day_set_names = ["cooling_2012", "heating_2012-2013", "cooling_2013"]
core_day_set_name_column = [i for i in islice(cycle(core_day_set_names), None, n_columns)]
data = {
'sw_version': string_placeholder,
'ct_identifier': string_placeholder,
'equipment_type': string_placeholder,
'heating_or_cooling': core_day_set_name_column,
'station': string_placeholder,
'zipcode': zipcode_column,
'climate_zone': string_placeholder,
'start_date': datetime(2011, 1, 1),
'end_date': datetime(2012, 1, 1),
'n_days_both_heating_and_cooling': one_column,
'n_days_in_inputfile_date_range': one_column,
'n_days_insufficient_data': zero_column,
'n_core_heating_days': one_column,
'baseline_percentile_core_cooling_comfort_temperature': float_column,
'baseline_percentile_core_heating_comfort_temperature': float_column,
'regional_average_baseline_cooling_comfort_temperature': float_column,
'regional_average_baseline_heating_comfort_temperature': float_column,
'percent_savings_baseline_percentile': float_column,
'avoided_daily_mean_core_day_runtime_baseline_percentile': float_column,
'avoided_total_core_day_runtime_baseline_percentile': float_column,
'baseline_daily_mean_core_day_runtime_baseline_percentile': float_column,
'baseline_total_core_day_runtime_baseline_percentile': float_column,
'_daily_mean_core_day_demand_baseline_baseline_percentile': float_column,
'percent_savings_baseline_regional': float_column,
'avoided_daily_mean_core_day_runtime_baseline_regional': float_column,
'avoided_total_core_day_runtime_baseline_regional': float_column,
'baseline_daily_mean_core_day_runtime_baseline_regional': float_column,
'baseline_total_core_day_runtime_baseline_regional': float_column,
'_daily_mean_core_day_demand_baseline_baseline_regional': float_column,
'mean_demand': float_column,
'alpha': float_column,
'tau': float_column,
'mean_sq_err': float_column,
'root_mean_sq_err': float_column,
'cv_root_mean_sq_err': float_column,
'mean_abs_err': float_column,
'mean_abs_pct_err': float_column,
'total_core_cooling_runtime': float_column,
'total_core_heating_runtime': float_column,
'total_auxiliary_heating_core_day_runtime': float_column,
'total_emergency_heating_core_day_runtime': float_column,
'daily_mean_core_cooling_runtime': float_column,
'daily_mean_core_heating_runtime': float_column,
'core_cooling_days_mean_indoor_temperature': float_column,
'core_cooling_days_mean_outdoor_temperature': float_column,
'core_heating_days_mean_indoor_temperature': float_column,
'core_heating_days_mean_outdoor_temperature': float_column,
'core_mean_indoor_temperature': float_column,
'core_mean_outdoor_temperature': float_column,
'rhu1_aux_duty_cycle': float_column,
'rhu1_emg_duty_cycle': float_column,
'rhu1_compressor_duty_cycle': float_column,
'rhu1_00F_to_05F': float_column,
'rhu1_05F_to_10F': float_column,
'rhu1_10F_to_15F': float_column,
'rhu1_15F_to_20F': float_column,
'rhu1_20F_to_25F': float_column,
'rhu1_25F_to_30F': float_column,
'rhu1_30F_to_35F': float_column,
'rhu1_35F_to_40F': float_column,
'rhu1_40F_to_45F': float_column,
'rhu1_45F_to_50F': float_column,
'rhu1_50F_to_55F': float_column,
'rhu1_55F_to_60F': float_column,
'rhu1_less10F': float_column,
'rhu1_10F_to_20F': float_column,
'rhu1_20F_to_30F': float_column,
'rhu1_30F_to_40F': float_column,
'rhu1_40F_to_50F': float_column,
'rhu1_50F_to_60F': float_column,
'rhu1_00F_to_05F_aux_duty_cycle': float_column,
'rhu1_05F_to_10F_aux_duty_cycle': float_column,
'rhu1_10F_to_15F_aux_duty_cycle': float_column,
'rhu1_15F_to_20F_aux_duty_cycle': float_column,
'rhu1_20F_to_25F_aux_duty_cycle': float_column,
'rhu1_25F_to_30F_aux_duty_cycle': float_column,
'rhu1_30F_to_35F_aux_duty_cycle': float_column,
'rhu1_35F_to_40F_aux_duty_cycle': float_column,
'rhu1_40F_to_45F_aux_duty_cycle': float_column,
'rhu1_45F_to_50F_aux_duty_cycle': float_column,
'rhu1_50F_to_55F_aux_duty_cycle': float_column,
'rhu1_55F_to_60F_aux_duty_cycle': float_column,
'rhu1_less10F_aux_duty_cycle': float_column,
'rhu1_10F_to_20F_aux_duty_cycle': float_column,
'rhu1_20F_to_30F_aux_duty_cycle': float_column,
'rhu1_30F_to_40F_aux_duty_cycle': float_column,
'rhu1_40F_to_50F_aux_duty_cycle': float_column,
'rhu1_50F_to_60F_aux_duty_cycle': float_column,
'rhu1_00F_to_05F_emg_duty_cycle': float_column,
'rhu1_05F_to_10F_emg_duty_cycle': float_column,
'rhu1_10F_to_15F_emg_duty_cycle': float_column,
'rhu1_15F_to_20F_emg_duty_cycle': float_column,
'rhu1_20F_to_25F_emg_duty_cycle': float_column,
'rhu1_25F_to_30F_emg_duty_cycle': float_column,
'rhu1_30F_to_35F_emg_duty_cycle': float_column,
'rhu1_35F_to_40F_emg_duty_cycle': float_column,
'rhu1_40F_to_45F_emg_duty_cycle': float_column,
'rhu1_45F_to_50F_emg_duty_cycle': float_column,
'rhu1_50F_to_55F_emg_duty_cycle': float_column,
'rhu1_55F_to_60F_emg_duty_cycle': float_column,
'rhu1_less10F_emg_duty_cycle': float_column,
'rhu1_10F_to_20F_emg_duty_cycle': float_column,
'rhu1_20F_to_30F_emg_duty_cycle': float_column,
'rhu1_30F_to_40F_emg_duty_cycle': float_column,
'rhu1_40F_to_50F_emg_duty_cycle': float_column,
'rhu1_50F_to_60F_emg_duty_cycle': float_column,
'rhu1_00F_to_05F_compressor_duty_cycle': float_column,
'rhu1_05F_to_10F_compressor_duty_cycle': float_column,
'rhu1_10F_to_15F_compressor_duty_cycle': float_column,
'rhu1_15F_to_20F_compressor_duty_cycle': float_column,
'rhu1_20F_to_25F_compressor_duty_cycle': float_column,
'rhu1_25F_to_30F_compressor_duty_cycle': float_column,
'rhu1_30F_to_35F_compressor_duty_cycle': float_column,
'rhu1_35F_to_40F_compressor_duty_cycle': float_column,
'rhu1_40F_to_45F_compressor_duty_cycle': float_column,
'rhu1_45F_to_50F_compressor_duty_cycle': float_column,
'rhu1_50F_to_55F_compressor_duty_cycle': float_column,
'rhu1_55F_to_60F_compressor_duty_cycle': float_column,
'rhu1_less10F_compressor_duty_cycle': float_column,
'rhu1_10F_to_20F_compressor_duty_cycle': float_column,
'rhu1_20F_to_30F_compressor_duty_cycle': float_column,
'rhu1_30F_to_40F_compressor_duty_cycle': float_column,
'rhu1_40F_to_50F_compressor_duty_cycle': float_column,
'rhu1_50F_to_60F_compressor_duty_cycle': float_column,
'rhu2_aux_duty_cycle': float_column,
'rhu2_emg_duty_cycle': float_column,
'rhu2_compressor_duty_cycle': float_column,
'rhu2_00F_to_05F': float_column,
'rhu2_05F_to_10F': float_column,
'rhu2_10F_to_15F': float_column,
'rhu2_15F_to_20F': float_column,
'rhu2_20F_to_25F': float_column,
'rhu2_25F_to_30F': float_column,
'rhu2_30F_to_35F': float_column,
'rhu2_35F_to_40F': float_column,
'rhu2_40F_to_45F': float_column,
'rhu2_45F_to_50F': float_column,
'rhu2_50F_to_55F': float_column,
'rhu2_55F_to_60F': float_column,
'rhu2_less10F': float_column,
'rhu2_10F_to_20F': float_column,
'rhu2_20F_to_30F': float_column,
'rhu2_30F_to_40F': float_column,
'rhu2_40F_to_50F': float_column,
'rhu2_50F_to_60F': float_column,
'rhu2_00F_to_05F_aux_duty_cycle': float_column,
'rhu2_05F_to_10F_aux_duty_cycle': float_column,
'rhu2_10F_to_15F_aux_duty_cycle': float_column,
'rhu2_15F_to_20F_aux_duty_cycle': float_column,
'rhu2_20F_to_25F_aux_duty_cycle': float_column,
'rhu2_25F_to_30F_aux_duty_cycle': float_column,
'rhu2_30F_to_35F_aux_duty_cycle': float_column,
'rhu2_35F_to_40F_aux_duty_cycle': float_column,
'rhu2_40F_to_45F_aux_duty_cycle': float_column,
'rhu2_45F_to_50F_aux_duty_cycle': float_column,
'rhu2_50F_to_55F_aux_duty_cycle': float_column,
'rhu2_55F_to_60F_aux_duty_cycle': float_column,
'rhu2_less10F_aux_duty_cycle': float_column,
'rhu2_10F_to_20F_aux_duty_cycle': float_column,
'rhu2_20F_to_30F_aux_duty_cycle': float_column,
'rhu2_30F_to_40F_aux_duty_cycle': float_column,
'rhu2_40F_to_50F_aux_duty_cycle': float_column,
'rhu2_50F_to_60F_aux_duty_cycle': float_column,
'rhu2_00F_to_05F_emg_duty_cycle': float_column,
'rhu2_05F_to_10F_emg_duty_cycle': float_column,
'rhu2_10F_to_15F_emg_duty_cycle': float_column,
'rhu2_15F_to_20F_emg_duty_cycle': float_column,
'rhu2_20F_to_25F_emg_duty_cycle': float_column,
'rhu2_25F_to_30F_emg_duty_cycle': float_column,
'rhu2_30F_to_35F_emg_duty_cycle': float_column,
'rhu2_35F_to_40F_emg_duty_cycle': float_column,
'rhu2_40F_to_45F_emg_duty_cycle': float_column,
'rhu2_45F_to_50F_emg_duty_cycle': float_column,
'rhu2_50F_to_55F_emg_duty_cycle': float_column,
'rhu2_55F_to_60F_emg_duty_cycle': float_column,
'rhu2_less10F_emg_duty_cycle': float_column,
'rhu2_10F_to_20F_emg_duty_cycle': float_column,
'rhu2_20F_to_30F_emg_duty_cycle': float_column,
'rhu2_30F_to_40F_emg_duty_cycle': float_column,
'rhu2_40F_to_50F_emg_duty_cycle': float_column,
'rhu2_50F_to_60F_emg_duty_cycle': float_column,
'rhu2_00F_to_05F_compressor_duty_cycle': float_column,
'rhu2_05F_to_10F_compressor_duty_cycle': float_column,
'rhu2_10F_to_15F_compressor_duty_cycle': float_column,
'rhu2_15F_to_20F_compressor_duty_cycle': float_column,
'rhu2_20F_to_25F_compressor_duty_cycle': float_column,
'rhu2_25F_to_30F_compressor_duty_cycle': float_column,
'rhu2_30F_to_35F_compressor_duty_cycle': float_column,
'rhu2_35F_to_40F_compressor_duty_cycle': float_column,
'rhu2_40F_to_45F_compressor_duty_cycle': float_column,
'rhu2_45F_to_50F_compressor_duty_cycle': float_column,
'rhu2_50F_to_55F_compressor_duty_cycle': float_column,
'rhu2_55F_to_60F_compressor_duty_cycle': float_column,
'rhu2_less10F_compressor_duty_cycle': float_column,
'rhu2_10F_to_20F_compressor_duty_cycle': float_column,
'rhu2_20F_to_30F_compressor_duty_cycle': float_column,
'rhu2_30F_to_40F_compressor_duty_cycle': float_column,
'rhu2_40F_to_50F_compressor_duty_cycle': float_column,
'rhu2_50F_to_60F_compressor_duty_cycle': float_column,
}
df = pd.DataFrame(data, columns=columns)
return df
@pytest.fixture
def dataframes():
df1 = get_fake_output_df(10)
df2 = get_fake_output_df(10)
dfs = [df1, df2]
return dfs
@pytest.fixture
def combined_dataframe():
df = get_fake_output_df(100)
return df
def test_combine_output_dataframes(dataframes):
combined = combine_output_dataframes(dataframes)
assert combined.shape == (20, 200)
def test_compute_summary_statistics(combined_dataframe):
summary_statistics = compute_summary_statistics(combined_dataframe)
assert [len(s) for s in summary_statistics] == [
49, 49, 49, 49,
9105, 901, 9105, 901,
]
def test_compute_summary_statistics_advanced(combined_dataframe):
summary_statistics = compute_summary_statistics(combined_dataframe,
advanced_filtering=True)
assert [len(s) for s in summary_statistics] == [
49, 49, 49, 49, 49, 49, 49, 49,
9105, 901, 9105, 901, 9105, 901, 9105, 901,
]
def test_summary_statistics_to_csv(combined_dataframe):
summary_statistics = compute_summary_statistics(combined_dataframe)
_, fname = tempfile.mkstemp()
product_id = "FAKE"
stats_df = summary_statistics_to_csv(summary_statistics, fname, product_id)
assert isinstance(stats_df, pd.DataFrame)
stats_df_reread = | pd.read_csv(fname) | pandas.read_csv |
import shutil
import GenFiles
import os
import zipfile
import tempfile
import pandas as pd
from collections import defaultdict
import csv
def remove_from_zip(zipfname, *filenames):
tempdir = tempfile.mkdtemp()
try:
tempname = os.path.join(tempdir, 'new.zip')
with zipfile.ZipFile(zipfname, 'r') as zipread:
with zipfile.ZipFile(tempname, 'w') as zipwrite:
for item in zipread.infolist():
if item.filename not in filenames:
data = zipread.read(item.filename)
zipwrite.writestr(item, data)
shutil.move(tempname, zipfname)
finally:
shutil.rmtree(tempdir)
Zip_lat='/run/user/1000/gvfs/smb-share:server=kis-h2.si,share=kisdfs/ZIV/vol1/ZIV/VSI/JanaO/Rjava/ZipFiles/'
tempDir='/home/janao/Genotipi/SampleMaps/'
zipPackages = (filter(lambda x: x.endswith('.zip'), os.listdir(Zip_lat)))
os.chdir(tempDir)
#correct sample names and rezip files
for zipPackage in zipPackages:
shutil.copy(Zip_lat+zipPackage, tempDir) #copy zipPackage into temp dir
onePackage=GenFiles.genZipPackage(zipPackage)
errorIDs = onePackage.extractErrorNames() #extract Sample Names if they exist - they shouldnt be in the file
if errorIDs:
shutil.move(onePackage.name+'_Sample_Map.txt', 'Sample_Map.txt') #rename extracted SampleMap
onePackage.extractFinalReport() #extract Finalreport to replace the spurious names
for i in errorIDs:
os.system('sed -i "s|' +str(i[0])+ '|' + i[1] + '|g" ' + onePackage.name+"_FinalReport.txt") #errorIDs are tuples, replace first element witht the second
os.system('sed -i "s|' +str(i[0])+ '|' + i[1] + '|g" '+'Sample_Map.txt')
#remove old Sample_Map and FinalReport from the zip archive and put the new one into the archive
#zip_deflated to compress the zip (reduce in size)
with zipfile.ZipFile(onePackage.name+'_FinalReport.zip', 'w', zipfile.ZIP_DEFLATED) as myzip:
myzip.write(onePackage.name+'_FinalReport.txt') #create new FinalReport zip
with zipfile.ZipFile('Sample_Map.zip', 'w', zipfile.ZIP_DEFLATED) as myzip:
myzip.write('Sample_Map.txt') #create new Sample_Map.zip
remove_from_zip(onePackage.zipname, onePackage.name+'_FinalReport.zip')
remove_from_zip(onePackage.zipname, 'Sample_Map.zip')
with zipfile.ZipFile(onePackage.zipname, 'a', zipfile.ZIP_DEFLATED) as z:
z.write(onePackage.name+'_FinalReport.zip')
with zipfile.ZipFile(onePackage.zipname, 'a', zipfile.ZIP_DEFLATED) as z:
z.write('Sample_Map.zip')
os.remove(onePackage.name+'_FinalReport.zip') #remove temp extracted FInalReports (with wrong names)
os.remove(onePackage.name+'_FinalReport.txt')
os.remove('Sample_Map.txt')
os.remove('Sample_Map.zip')
else: #if no errorIDs are found, remove the zip package thath you've just copied
os.remove(zipPackage)
os.remove(onePackage.name+'_Sample_Map.txt')
######################################################################
######################################################################
#this is to check whether all the names in the files got corrected right
tempDir='/home/janao/Genotipi/SampleMaps/'
os.chdir(tempDir)
AllGenInd = []
zipPackages = (filter(lambda x: x.endswith('.zip'), os.listdir(tempDir)))
for zipPackage in zipPackages:
#shutil.copy(Zip_lat+zipPackage, tempDir)
onePackage=GenFiles.genZipPackage(zipPackage)
onePackage.extractSampleMap()
table=pd.read_table(onePackage.name + '_Sample_Map.txt')
AllGenInd += [(onePackage.name,x) for x in table['ID']]
os.remove(onePackage.name + '_Sample_Map.txt')
##################################################################################3
##################################################################################
#check how many IDs not found in pedigree (sequences)
RJ_IDSeq="/home/janao/Genotipi/Genotipi_CODES/Rjave_seq_ID.csv"
Rj_IDSeq_Dict = defaultdict()
with open(RJ_IDSeq, 'rb') as IDSeq:
reader = csv.reader(IDSeq, delimiter=',')
for line in reader:
Rj_IDSeq_Dict[line[0]] = line[1:]
lala = AllGenInd
AllGenIDs = [x[1] for x in AllGenInd]
AllGenIDs = pd.DataFrame(AllGenIDs)
AllGenIDs.to_csv(tempDir+'/AllGenInd.csv', sep=",")
All = pd.read_csv('AllGenInd.csv')
AllGenID = [] #vsi iz novih RJ Finalreports
errorIDs = []
for ind in [x.upper() for x in All['0']]:
try:
AllGenID.append((Rj_IDSeq_Dict.get(ind)[0]))
except:
errorIDs.append(ind)
############################################################################
############################################################################
#the errouneous IDs inserted by GeneSeek
replaceIDs = [('SI4574059','SI04574059'),('SI84048801','SI84048802'),('SI4384195','SI04384195'),('Si24289407','SI24289407')]
spPackages =[]
for i in AllGenInd:
for errorID in errorIDs:
if errorID in i:
spPackages.append(i)
spPackages=list(set(spPackages))
zipErrorPackages=[i[0]+'.zip' for i in spPackages]
zipErrorPackages=['Matija_Rigler_BOVGP4V01-2_20160926-2.zip'] #not all capitals
#replace only in this
for zipPackage in zipErrorPackages:
shutil.copy(Zip_lat+zipPackage, tempDir) #copy zipPackage into temp dir
onePackage=GenFiles.genZipPackage(zipPackage)
errorIDs = onePackage.extractErrorNames() #extract Sample Names if they exist - they shouldnt be in the file
if errorIDs:
shutil.move(onePackage.name+'_Sample_Map.txt', 'Sample_Map.txt') #rename extracted SampleMap
onePackage.extractFinalReport() #extract Finalreport to replace the spurious names
for i in errorIDs:
os.system('sed -i "s|' +str(i[0])+ '|' + i[1] + '|g" ' + onePackage.name+"_FinalReport.txt") #errorIDs are tuples, replace first element witht the second
os.system('sed -i "s|' +str(i[0])+ '|' + i[1] + '|g" '+'Sample_Map.txt')
#replace spurious IDs
for i in replaceIDs:
os.system('sed -i "s|' +i[0]+ '|' + i[1] + '|g" ' + onePackage.name+"_FinalReport.txt") #errorIDs are tuples, replace first element witht the second
os.system('sed -i "s|' +i[0]+ '|' + i[1] + '|g" '+'Sample_Map.txt')
#remove old Sample_Map and FinalReport from the zip archive and put the new one into the archive
#zip_deflated to compress the zip (reduce in size)
with zipfile.ZipFile(onePackage.name+'_FinalReport.zip', 'w', zipfile.ZIP_DEFLATED) as myzip:
myzip.write(onePackage.name+'_FinalReport.txt') #create new FinalReport zip
with zipfile.ZipFile('Sample_Map.zip', 'w', zipfile.ZIP_DEFLATED) as myzip:
myzip.write('Sample_Map.txt') #create new Sample_Map.zip
remove_from_zip(onePackage.zipname, onePackage.name+'_FinalReport.zip')
remove_from_zip(onePackage.zipname, 'Sample_Map.zip')
with zipfile.ZipFile(onePackage.zipname, 'a', zipfile.ZIP_DEFLATED) as z:
z.write(onePackage.name+'_FinalReport.zip')
with zipfile.ZipFile(onePackage.zipname, 'a', zipfile.ZIP_DEFLATED) as z:
z.write('Sample_Map.zip')
os.remove(onePackage.name+'_FinalReport.zip') #remove temp extracted FInalReports (with wrong names)
os.remove(onePackage.name+'_FinalReport.txt')
os.remove('Sample_Map.txt')
os.remove('Sample_Map.zip')
else: #if no errorIDs are found, remove the zip package thath you've just copied
os.remove(zipPackage)
os.remove(onePackage.name+'_Sample_Map.txt')
#after this check again for the errorIDs
SQLGen = | pd.read_csv('/home/janao/Genotipi/AllGen_27012017.csv') | pandas.read_csv |
"""
Basic plotting helpers.
"""
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mne.time_frequency.multitaper import psd_array_multitaper
from mne.time_frequency.psd import psd_array_welch
from mne.viz.utils import _convert_psds
from scipy.stats import circmean, circstd, sem
from statistical_testing import get_p_values
POLAR_XTICKS = np.pi / 180.0 * np.array([0, 90, 180, 270])
POLAR_XTICKLABELS = ["0", r"$\pi/2$", r"$-\pi$ = $\pi$", r"$-\pi/2$"]
def plot_spectrum(
data,
method="welch",
units="Hz",
title="",
spectrum_window_size=0.5,
alpha=1.0,
cmap=None,
legend=True,
logx=False,
logy=True,
ax=None,
):
"""
Plot power spectrum estimated using Welch method with Hamming window or
multi-taper method.
:param data: data for PSD estimation
:type data: pd.DataFrame
:param method: method for estimating the power spectrum, `welch` or
`multitaper`
:type method: str
:param units: units of data
:type units: str
:param title: title
:type title: str
:param spectrum_window_size: window size for the Hann window, in seconds
only used in Welch method
:type spectrum_window_size: float
:param cmap: colormap for colors
:type cmap: str
:param legend: whether to display a legend
:type legend: bool
:param logx: whether to draw x axis in logarithmic scale
:type logx: bool
:param logy: whether to draw y axis in logarithmic scale
:type logy: bool
:param ax: axis to plot to; if None, will create
:type ax: `matplotlib.axes._axes.Axes`|None
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
data = | pd.DataFrame(data) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
class TestLookup:
def test_lookup_float(self, float_frame):
df = float_frame
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
with tm.assert_produces_warning(FutureWarning):
result = df.lookup(rows, cols)
expected = np.array([df.loc[r, c] for r, c in zip(rows, cols)])
tm.assert_numpy_array_equal(result, expected)
def test_lookup_mixed(self, float_string_frame):
df = float_string_frame
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
with tm.assert_produces_warning(FutureWarning):
result = df.lookup(rows, cols)
expected = np.array(
[df.loc[r, c] for r, c in zip(rows, cols)], dtype=np.object_
)
tm.assert_almost_equal(result, expected)
def test_lookup_bool(self):
df = DataFrame(
{
"label": ["a", "b", "a", "c"],
"mask_a": [True, True, False, True],
"mask_b": [True, False, False, False],
"mask_c": [False, True, False, True],
}
)
with tm.assert_produces_warning(FutureWarning):
df["mask"] = df.lookup(df.index, "mask_" + df["label"])
exp_mask = np.array(
[df.loc[r, c] for r, c in zip(df.index, "mask_" + df["label"])]
)
tm.assert_series_equal(df["mask"], Series(exp_mask, name="mask"))
assert df["mask"].dtype == np.bool_
def test_lookup_raises(self, float_frame):
with pytest.raises(KeyError, match="'One or more row labels was not found'"):
with tm.assert_produces_warning(FutureWarning):
float_frame.lookup(["xyz"], ["A"])
with pytest.raises(KeyError, match="'One or more column labels was not found'"):
with tm.assert_produces_warning(FutureWarning):
float_frame.lookup([float_frame.index[0]], ["xyz"])
with pytest.raises(ValueError, match="same size"):
with tm.assert_produces_warning(FutureWarning):
float_frame.lookup(["a", "b", "c"], ["a"])
def test_lookup_requires_unique_axes(self):
# GH#33041 raise with a helpful error message
df = DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "A"])
rows = [0, 1]
cols = ["A", "A"]
# homogeneous-dtype case
with pytest.raises(ValueError, match="requires unique index and columns"):
with tm.assert_produces_warning(FutureWarning):
df.lookup(rows, cols)
with pytest.raises(ValueError, match="requires unique index and columns"):
with tm.assert_produces_warning(FutureWarning):
df.T.lookup(cols, rows)
# heterogeneous dtype
df["B"] = 0
with pytest.raises(ValueError, match="requires unique index and columns"):
with tm.assert_produces_warning(FutureWarning):
df.lookup(rows, cols)
def test_lookup_deprecated():
# GH#18262
df = DataFrame(
{"col": ["A", "A", "B", "B"], "A": [80, 23, np.nan, 22], "B": [80, 55, 76, 67]}
)
with | tm.assert_produces_warning(FutureWarning) | pandas._testing.assert_produces_warning |
import geopandas as gpd
import numpy as np
import pandas as pd
from shapely.ops import cascaded_union
import os
#This script splits the global basins (in total: 57025) following the Pfafstetter rules until all basins are under 5000 km2
#<NAME>, October 2019
def trace_upstream_id(COMID,riv):
#function to trace the entire network upstream of river with ID equaling COMID
#riv: whole network shapefile as GeoDataFrame; COMID: ID of river needs tracing upstream
if COMID not in riv:
return [COMID]
else:
list_up_id = [COMID]
for i in riv[COMID]:
list_up_id += trace_upstream_id(i,riv)
return list_up_id
def trace_interbasin(list_main,list_trib,riv):
idlist = list_main
for trib_id in list_trib:
idlist += trace_upstream_id(trib_id,riv)
return idlist
def to_list_up_id(x):
result = []
for c in ['up1','up2','up3','up4']:
if x[c]!=0:
result += [x[c]]
if len(result)==0:
result = np.nan
return result
def convert2dict_upid(df):
df['up_list'] = df.apply(to_list_up_id,axis=1)
df_tmp = df[['COMID','up_list']].dropna()
df_dict = dict(zip(df_tmp.COMID,df_tmp.up_list))
del df['up_list']
return df_dict
def read_all_rivers():
list_df = []
column_wanted = ['COMID','NextDownID','uparea','up1','up2','up3','up4','geometry']
path = '../../../MERIT/raster/cleaned/new_shapefiles/shapefile_props/level_01/'
for pfaf in range(1,9):
print('... read river network pfaf = %02d ...'%pfaf)
fn = os.path.join(path,'pfaf_%02d_riv_3sMERIT_props.shp'%pfaf)
df_tmp = gpd.read_file(fn)[column_wanted]
list_df.append(df_tmp)
return | pd.concat(list_df) | pandas.concat |
"""
@author <NAME>
@email <EMAIL>
"""
from __future__ import division
import pandas as pd
import numpy as np
import warnings
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import Lasso, LogisticRegression
from scipy.stats import kurtosis, skew
class FeatureTransformation(object):
"""
A new transformation need to
- Implement __init__ if needed and call super init
- Implement _fit_special_process
- Implement _transform_special_process
"""
def __init__(self, feature_name=None, shadow=False):
self._name = feature_name
self.fitted = False
self._process_name = None
self._shadow = shadow
@property
def feature_name(self):
return self._name
@property
def process_name(self):
return self._process_name
@property
def shadow(self):
return self._shadow
def _fit_special_process(self, data, target=None):
raise NotImplementedError()
def fit(self, data, target=None):
# Check feature is in data
if self._name not in data.columns:
raise ValueError("Feature " + self._name + " is not in the dataset")
# Call Transformation specific process
self._fit_special_process(data=data, target=target)
# Set fitted
self.fitted = True
def fit_transform(self, data, target=None):
# Call fit
self.fit(data=data, target=target)
# Call transform
return self.transform(data)
def _transform_special_process(self, data):
raise NotImplementedError()
def transform(self, data):
if self.fitted is not True:
raise NotFittedError("Transformation is not fitted yet.")
# Check if shadow shuffling process has to be used
if self._shadow:
temp = self._transform_special_process(data)
if "dataframe" in str(type(temp)).lower():
z = np.array(temp)
idx = np.arange(len(z))
np.random.shuffle(idx)
return pd.DataFrame(z[idx],
columns=temp.columns,
index=temp.index)
else:
z = np.array(temp)
np.random.shuffle(z)
return pd.Series(z, name="shadow_" + self._name, index=temp.index)
else:
return self._transform_special_process(data)
class IdentityTransformation(FeatureTransformation):
def __init__(self, feature_name=None, shadow=False):
# Call super
super(IdentityTransformation, self).__init__(feature_name=feature_name, shadow=shadow)
self._process_name = "Identity"
def _fit_special_process(self, data, target=None):
pass
def _transform_special_process(self, data):
return data[self._name]
class ShadowTransformation(FeatureTransformation):
def __init__(self, feature_name=None):
# Call super
super(ShadowTransformation, self).__init__(feature_name)
self._process_name = "Shadow"
def _fit_special_process(self, data, target=None):
pass
def _transform_special_process(self, data):
z = data[[self._name]].copy()
vals = np.array(data[self._name])
np.random.shuffle(vals)
z[self._name] = vals
return pd.Series(z[self._name], name="shadow_" + self._name, index=z.index)
class ExpoTransformation(FeatureTransformation):
def __init__(self, feature_name=None):
# Call super
super(ExpoTransformation, self).__init__(feature_name)
self._process_name = "Exponential"
def _fit_special_process(self, data, target=None):
pass
def _transform_special_process(self, data):
return np.exp(data[self._name])
class PowerTransformation(FeatureTransformation):
def __init__(self, feature_name=None, power=2):
# Call super
super(PowerTransformation, self).__init__(feature_name)
self.power = power
self._process_name = "Power_" + str(power)
def _fit_special_process(self, data, target=None):
pass
def _transform_special_process(self, data):
return np.power(data[self._name], self.power)
class InversePowerTransformation(FeatureTransformation):
def __init__(self, feature_name=None, power=1, epsilon=1e-5):
# Call super
super(InversePowerTransformation, self).__init__(feature_name)
self.power = power
self.epsilon = epsilon
self._process_name = "Inverse_Power_" + str(power)
def _fit_special_process(self, data, target=None):
# Check for zeros
if | pd.Series(data[self._name] == 0) | pandas.Series |
"""
Functions about routes.
"""
from collections import OrderedDict
from typing import Optional, Iterable, List, Dict, TYPE_CHECKING
import json
import geopandas as gp
import pandas as pd
import numpy as np
import shapely.geometry as sg
import shapely.ops as so
import folium as fl
from . import constants as cs
from . import helpers as hp
# Help mypy but avoid circular imports
if TYPE_CHECKING:
from .feed import Feed
def compute_route_stats_0(
trip_stats_subset: pd.DataFrame,
headway_start_time: str = "07:00:00",
headway_end_time: str = "19:00:00",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute stats for the given subset of trips stats (of the form output by the
function :func:`.trips.compute_trip_stats`).
If ``split_directions``, then separate the stats by trip direction (0 or 1).
Use the headway start and end times to specify the time period for computing
headway stats.
Return a DataFrame with the columns
- ``'route_id'``
- ``'route_short_name'``
- ``'route_type'``
- ``'direction_id'``
- ``'num_trips'``: number of trips on the route in the subset
- ``'num_trip_starts'``: number of trips on the route with
nonnull start times
- ``'num_trip_ends'``: number of trips on the route with nonnull
end times that end before 23:59:59
- ``'is_loop'``: 1 if at least one of the trips on the route has
its ``is_loop`` field equal to 1; 0 otherwise
- ``'is_bidirectional'``: 1 if the route has trips in both
directions; 0 otherwise
- ``'start_time'``: start time of the earliest trip on the route
- ``'end_time'``: end time of latest trip on the route
- ``'max_headway'``: maximum of the durations (in minutes)
between trip starts on the route between
``headway_start_time`` and ``headway_end_time`` on the given
dates
- ``'min_headway'``: minimum of the durations (in minutes)
mentioned above
- ``'mean_headway'``: mean of the durations (in minutes)
mentioned above
- ``'peak_num_trips'``: maximum number of simultaneous trips in
service (for the given direction, or for both directions when
``split_directions==False``)
- ``'peak_start_time'``: start time of first longest period
during which the peak number of trips occurs
- ``'peak_end_time'``: end time of first longest period during
which the peak number of trips occurs
- ``'service_duration'``: total of the duration of each trip on
the route in the given subset of trips; measured in hours
- ``'service_distance'``: total of the distance traveled by each
trip on the route in the given subset of trips; measured in
whatever distance units are present in ``trip_stats_subset``;
contains all ``np.nan`` entries if ``feed.shapes is None``
- ``'service_speed'``: service_distance/service_duration;
measured in distance units per hour
- ``'mean_trip_distance'``: service_distance/num_trips
- ``'mean_trip_duration'``: service_duration/num_trips
If not ``split_directions``, then remove the
direction_id column and compute each route's stats,
except for headways, using its trips running in both directions.
In this case, (1) compute max headway by taking the max of the
max headways in both directions; (2) compute mean headway by
taking the weighted mean of the mean headways in both
directions.
If ``trip_stats_subset`` is empty, return an empty DataFrame.
Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
if trip_stats_subset.empty:
return pd.DataFrame()
# Convert trip start and end times to seconds to ease calculations below
f = trip_stats_subset.copy()
f[["start_time", "end_time"]] = f[["start_time", "end_time"]].applymap(
hp.timestr_to_seconds
)
headway_start = hp.timestr_to_seconds(headway_start_time)
headway_end = hp.timestr_to_seconds(headway_end_time)
def compute_route_stats_split_directions(group):
# Take this group of all trips stats for a single route
# and compute route-level stats.
d = OrderedDict()
d["route_short_name"] = group["route_short_name"].iat[0]
d["route_type"] = group["route_type"].iat[0]
d["num_trips"] = group.shape[0]
d["num_trip_starts"] = group["start_time"].count()
d["num_trip_ends"] = group.loc[
group["end_time"] < 24 * 3600, "end_time"
].count()
d["is_loop"] = int(group["is_loop"].any())
d["start_time"] = group["start_time"].min()
d["end_time"] = group["end_time"].max()
# Compute max and mean headway
stimes = group["start_time"].values
stimes = sorted(
[stime for stime in stimes if headway_start <= stime <= headway_end]
)
headways = np.diff(stimes)
if headways.size:
d["max_headway"] = np.max(headways) / 60 # minutes
d["min_headway"] = np.min(headways) / 60 # minutes
d["mean_headway"] = np.mean(headways) / 60 # minutes
else:
d["max_headway"] = np.nan
d["min_headway"] = np.nan
d["mean_headway"] = np.nan
# Compute peak num trips
active_trips = hp.get_active_trips_df(group[["start_time", "end_time"]])
times, counts = active_trips.index.values, active_trips.values
start, end = hp.get_peak_indices(times, counts)
d["peak_num_trips"] = counts[start]
d["peak_start_time"] = times[start]
d["peak_end_time"] = times[end]
d["service_distance"] = group["distance"].sum()
d["service_duration"] = group["duration"].sum()
return pd.Series(d)
def compute_route_stats(group):
d = OrderedDict()
d["route_short_name"] = group["route_short_name"].iat[0]
d["route_type"] = group["route_type"].iat[0]
d["num_trips"] = group.shape[0]
d["num_trip_starts"] = group["start_time"].count()
d["num_trip_ends"] = group.loc[
group["end_time"] < 24 * 3600, "end_time"
].count()
d["is_loop"] = int(group["is_loop"].any())
d["is_bidirectional"] = int(group["direction_id"].unique().size > 1)
d["start_time"] = group["start_time"].min()
d["end_time"] = group["end_time"].max()
# Compute headway stats
headways = np.array([])
for direction in [0, 1]:
stimes = group[group["direction_id"] == direction]["start_time"].values
stimes = sorted(
[stime for stime in stimes if headway_start <= stime <= headway_end]
)
headways = np.concatenate([headways, np.diff(stimes)])
if headways.size:
d["max_headway"] = np.max(headways) / 60 # minutes
d["min_headway"] = np.min(headways) / 60 # minutes
d["mean_headway"] = np.mean(headways) / 60 # minutes
else:
d["max_headway"] = np.nan
d["min_headway"] = np.nan
d["mean_headway"] = np.nan
# Compute peak num trips
active_trips = hp.get_active_trips_df(group[["start_time", "end_time"]])
times, counts = active_trips.index.values, active_trips.values
start, end = hp.get_peak_indices(times, counts)
d["peak_num_trips"] = counts[start]
d["peak_start_time"] = times[start]
d["peak_end_time"] = times[end]
d["service_distance"] = group["distance"].sum()
d["service_duration"] = group["duration"].sum()
return | pd.Series(d) | pandas.Series |
import numpy as np
import matplotlib.pyplot as plt
import pyvista as pv
import pandas as pd
from skimage import measure
from scipy.integrate import simps
from scipy.interpolate import griddata
import geopandas as gpd
from shapely.geometry import MultiPolygon, Polygon
from zmapio import ZMAPGrid
def poly_area(x,y):
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
class Surface:
def __init__(self, **kwargs):
self.x = kwargs.pop('x',None)
self.y = kwargs.pop('y',None)
self.z = kwargs.pop('z',None)
self.crs = kwargs.pop('crs',4326)
#Properties
@property
def x(self):
return self._x
@x.setter
def x(self,value):
if value is not None:
assert isinstance(value,np.ndarray)
assert value.ndim == 2
self._x = value
@property
def y(self):
return self._y
@y.setter
def y(self,value):
if value is not None:
assert isinstance(value,np.ndarray)
assert value.ndim == 2
self._y = value
@property
def z(self):
return self._z
@z.setter
def z(self,value):
if value is not None:
assert isinstance(value,np.ndarray)
assert value.ndim == 2
self._z = value
@property
def crs(self):
return self._crs
@crs.setter
def crs(self,value):
assert isinstance(value,(int,str,type(None))), f"{type(value)} not accepted. Name must be str. Example 'EPSG:3117'"
if isinstance(value,int):
value = f'EPSG:{value}'
elif isinstance(value,str):
assert value.startswith('EPSG:'), 'if crs is string must starts with EPSG:. If integer must be the Coordinate system reference number EPSG http://epsg.io/'
self._crs = value
def contour(self,ax=None,**kwargs):
#Create the Axex
cax= ax or plt.gca()
return cax.contour(self.x,self.y,self.z,**kwargs)
def contourf(self,ax=None,**kwargs):
#Create the Axex
cax= ax or plt.gca()
return cax.contourf(self.x,self.y,self.z,**kwargs)
def structured_surface_vtk(self):
#Get a Pyvista Object StructedGrid
grid = pv.StructuredGrid(self.x, self.y, self.z).elevation()
return grid
def get_contours_bound(self,levels=None,zmin=None,zmax=None,n=10):
#define levels
if levels is not None:
assert isinstance(levels,(np.ndarray,list))
levels = np.atleast_1d(levels)
assert levels.ndim==1
else:
zmin = zmin if zmin is not None else np.nanmin(self.z)
zmax = zmax if zmax is not None else np.nanmax(self.z)
levels = np.linspace(zmin,zmax,n)
xmax = np.nanmax(self.x)
ymax = np.nanmax(self.y)
xmin = np.nanmin(self.x)
ymin = np.nanmin(self.y)
#iterate over levels levels
contours = self.structured_surface_vtk().contour(isosurfaces=levels.tolist())
contours.points[:,2] = contours['Elevation']
df = pd.DataFrame(contours.points, columns=['x','y','z'])
#Organize the points according their angle with respect the centroid. This is done with the
#porpuse of plot the bounds continously.
list_df_sorted = []
for i in df['z'].unique():
df_z = df.loc[df['z']==i,['x','y','z']]
centroid = df_z[['x','y']].mean(axis=0).values
df_z[['delta_x','delta_y']] = df_z[['x','y']] - centroid
df_z['angle'] = np.arctan2(df_z['delta_y'],df_z['delta_x'])
df_z.sort_values(by='angle', inplace=True)
list_df_sorted.append(df_z)
return pd.concat(list_df_sorted, axis=0)
def get_contours_area_bounds(self,levels=None,n=10,zmin=None,zmax=None,c=2.4697887e-4):
contours = self.get_contours_bound(levels=levels,zmin=zmin,zmax=zmax,n=n)
area_dict= {}
for i in contours['z'].unique():
poly = contours.loc[contours['z']==i,['x','y']]
area = poly_area(poly['x'],poly['y'])
area_dict.update({i:area*c})
return pd.DataFrame.from_dict(area_dict, orient='index', columns=['area'])
def get_contours_area_mesh(self,levels=None,n=10,zmin=None,zmax=None,c=2.4697887e-4):
zmin = zmin if zmin is not None else np.nanmin(self.z)
zmax = zmax if zmax is not None else np.nanmax(self.z)
if levels is not None:
assert isinstance(levels,(np.ndarray,list))
levels = np.atleast_1d(levels)
assert levels.ndim==1
else:
levels = np.linspace(zmin,zmax,n)
dif_x = np.diff(self.x,axis=1).mean(axis=0)
dif_y = np.diff(self.y,axis=0).mean(axis=1)
dxx, dyy = np.meshgrid(dif_x,dif_y)
area_dict = {}
for i in levels:
z = self.z.copy()
z[(z<i)|(z>zmax)|(z<zmin)] = np.nan
z = z[1:,1:]
a = dxx * dyy * ~np.isnan(z) *2.4697887e-4
area_dict.update({i:a.sum()})
return pd.DataFrame.from_dict(area_dict, orient='index', columns=['area'])
def get_contours(self,levels=None,zmin=None,zmax=None,n=10):
#define levels
if levels is not None:
assert isinstance(levels,(np.ndarray,list))
levels = np.atleast_1d(levels)
assert levels.ndim==1
else:
zmin = zmin if zmin is not None else np.nanmin(self.z)
zmax = zmax if zmax is not None else np.nanmax(self.z)
levels = np.linspace(zmin,zmax,n)
zz = self.z
xmax = np.nanmax(self.x)
ymax = np.nanmax(self.y)
xmin = np.nanmin(self.x)
ymin = np.nanmin(self.y)
#iterate over levels levels
data = pd.DataFrame()
i = 0
for level in levels:
contours = measure.find_contours(zz,level)
if contours == []:
continue
else:
for contour in contours:
level_df = pd.DataFrame(contour, columns=['y','x'])
level_df['level'] = level
level_df['n'] = i
data = data.append(level_df,ignore_index=True)
i += 1
if not data.empty:
#re scale
data['x'] = (data['x']/zz.shape[1]) * (xmax - xmin) + xmin
data['y'] = (data['y']/zz.shape[0]) * (ymax - ymin) + ymin
return data
def get_contours_gdf(self,levels=None,zmin=None,zmax=None,n=10, crs="EPSG:4326"):
#define levels
if levels is not None:
assert isinstance(levels,(np.ndarray,list))
levels = np.atleast_1d(levels)
assert levels.ndim==1
else:
zmin = zmin if zmin is not None else np.nanmin(self.z)
zmax = zmax if zmax is not None else np.nanmax(self.z)
levels = np.linspace(zmin,zmax,n)
zz = self.z
xmax = np.nanmax(self.x)
ymax = np.nanmax(self.y)
xmin = np.nanmin(self.x)
ymin = np.nanmin(self.y)
#iterate over levels levels
data = gpd.GeoDataFrame()
i = 0
for level in levels:
poly_list =[]
contours = measure.find_contours(zz,level)
if contours == []:
continue
else:
for contour in contours:
level_df = | pd.DataFrame(contour, columns=['y','x']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 16:39:12 2021
@author: jackreid
"""
import pandas as pd
import numpy as np
import csv
import matplotlib.pyplot as plt
from screeninfo import get_monitors
#Set filepath of data
Location = 'Angola'
filepaths = {'Indonesia' : '/home/jackreid/Documents/School/Research/Space Enabled/Code/Decisions/Auxilary Files/SummaryGraphs/Indonesia/Indonesia_graphsV3.csv',
'Angola': '/home/jackreid/Documents/School/Research/Space Enabled/Code/Decisions/Auxilary Files/SummaryGraphs/Luanda/Luanda_graphs.csv',
'Queretaro' : '/home/jackreid/Documents/School/Research/Space Enabled/Code/Decisions/Auxilary Files/SummaryGraphs/Querétaro/Queretaro_graphs.csv',
'Rio de Janeiro': '/home/jackreid/Documents/School/Research/Space Enabled/Code/Decisions/Auxilary Files/SummaryGraphs/Rio de Janeiro/Rio_graphs_Date.csv',
'Metropolitana': '/home/jackreid/Documents/School/Research/Space Enabled/Code/Decisions/Auxilary Files/SummaryGraphs/Santiago/Metropolitana_graphs.csv'}
transit_title = {'Indonesia' : 'transit_stations_mob',
'Angola': 'nat_transit_mob',
'Queretaro' : 'loc_transit_mob',
'Rio de Janeiro': 'transit_stations_mob',
'Metropolitana': 'transit_stations_mob'}
filepath = filepaths[Location]
#Get screen resolution, used for sizing the graphs later on
for m in get_monitors():
print(str(m))
my_dpi = m.width/(m.width_mm*0.0393701)
#Extract data from the csv
datalist = []
with open(filepath) as csvfile:
readCSV1 = csv.DictReader(csvfile, delimiter=',')
for row in readCSV1:
newrow = dict()
for entry in row.keys():
if row[entry]:
if entry != 'Closure Policy Name' and entry not in ['Date_Name','Policy_Name']:
newrow[entry] = float(row[entry])
else:
newrow[entry] = np.nan
datalist.append(newrow)
#Convert data into a DataFrame for plotting purposes
df_data = | pd.DataFrame(datalist) | pandas.DataFrame |
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import pickle
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt # for making plots,
# project functions
from src.utils.FastClassAI_skilearn_tools import prepare_list_with_subset_collection_composition_list
from src.utils.cnn_transfer_learning_tools import CNN_GridSearch
from src.utils.cnn_transfer_learning_tools import create_keras_two_layer_dense_model
from src.utils.cnn_transfer_learning_tools import plot_NN_loss_acc
# Function, .................................................
def train_and_test_cnn_tranfer_learning_models(*,
# names
run_ID, # str, Unique ID added to name of each file to allow running and saving similar module create with different parameters or different data
dataset_name, # str, global, provided by the wrapper function,
dataset_variant, # str, global, provided by the wrapper function,
module_name, # str, global, provided by the wrapper function,
# define input data,
subset_collection_names, # list, anything that will allow you to identify which subset and run you are using
subset_collection_composition_dict, # list, wiht dataframes, the same lenght as subset_collection_names, returned by prepare_list_with_subset_collection_composition_list
data_subsets_role, # dict, with names of subsets used as train, valid and test + in case you wish to use
# model parameters,
method_name, # str, keywod in the function {knn, svm, logreg, dt, rf}
grid, # ParameterGrid object, wiht parameters for a given function,
# model selection cycles,
models_selected_at_each_cycle = 0.3, # int, how many models with best performace will be selected and trained with another round of training and with next subset collection
include_method_variant_with_selection = True, # bool, if True, top models_selected_at_each_cycle wiht different model variant will be selected to next cycle
include_random_nr_with_selection = False, # bool, if True, top models_selected_at_each_cycle wiht different random nr will be selected to next cycle
sort_models_by = "model_acc_valid", # str {"model_acc_valid"}
# saving
save_path, # str, eg PATH_results from configs
save_partial_results=True, # bool, if True, it will save model results at each iteration
# pipe variables, # same for all cycles,
class_encoding, # dict, global key: class_name, value:int
class_decoding, # dict, global key: int, value:class_name
train_proportion=0.7, # used only if validation datasets is not specified,
dropout_value=None, # str, from configs,
unit_test=False,
# other,
plot_history=True, # plots accuracy and error over epohs for each nn
verbose=True
):
# ** / options
if verbose==True:
display_partial_results=True
else:
display_partial_results=False
# create path for results wiht all options,
path_results = os.path.join(save_path, f"{method_name}__{dataset_name}__{dataset_variant}")
try:
os.mkdir(path_results)
if verbose==True:
print(f"\n Crated: {path_results}\n")
else:
pass
except:
pass
# ** / grid search
run_names = list()
for cycle_nr, subset_collection_name in enumerate(subset_collection_names):
print(f"\n - - - CYCLE: {cycle_nr} - - -\n")
# ...............................................
# get df with path and filenames to load
subset_collection_composition_df = subset_collection_composition_dict[subset_collection_name]
# set unique run name,
run_name = f"{subset_collection_name}__{run_ID}"
run_names.append(run_name)
# set name added to each saved file wiht results from that cycle
file_name_fingerprint = f'{method_name}__{dataset_name}__{dataset_variant}__{module_name}__{run_name}'
# set role for each subset
selected_data_subsets_role = data_subsets_role[subset_collection_name].copy()
# check if validation dataset is available, or it is
if isinstance(selected_data_subsets_role["valid"], float):
train_proportion_applied = 1-selected_data_subsets_role["valid"]
selected_data_subsets_role["valid"] = None
else:
train_proportion_applied = train_proportion # used, only if there will be None for valid role,
# ...............................................
# find grid with parameters
if cycle_nr==0:
"the grid is provided externally in 0 cycle"
cycle_grid = grid
else:
"here you must collect parameters from the best performing models, and extract params for top nr of them"
"options to include model variant in selection"
sort_by = "model_acc_valid"
# collect features you want to use to sort model results and get top of each of them
features_used_to_group_models = ["method", "dataset_name", "dataset_variant", "module"]
if include_random_nr_with_selection==True:
features_used_to_group_models.append("random_state_nr")
else:
pass
if include_method_variant_with_selection:
features_used_to_group_models.append("method_variant")
else:
pass
# add these features to df, with the model results as one column
for fi, feature in enumerate(features_used_to_group_models):
if fi==0:
composite_feature = results_from_last_cycle.loc[:, feature].values.tolist()
else:
composite_feature = [f"{x}__{y}" for (x,y) in zip(composite_feature,
results_from_last_cycle.loc[:, feature].values.tolist())]
results_from_last_cycle["method_full_name"] = composite_feature
# find best performing models in each group and sort them
method_groups = results_from_last_cycle.method_full_name.unique().tolist()
best_methods_IDs = list()
for ii, mg in enumerate(method_groups):
# subset summary_df for each method group
df_subset = results_from_last_cycle.loc[ results_from_last_cycle.method_full_name==mg, :]
df_subset = df_subset.sort_values(sort_by, ascending=False)
df_subset.reset_index(inplace=True, drop=True)
# find how many models will be selected for the next cycle,
if models_selected_at_each_cycle<1 and models_selected_at_each_cycle>0:
mnr = int(np.ceil(df_subset.shape[0]*models_selected_at_each_cycle))
elif models_selected_at_each_cycle==0:
mnr = 1
else:
mnr = models_selected_at_each_cycle
# because I had some rare situations with problems,
if mnr==0:
mnr=1
else:
pass
# find top n models in each
best_methods_IDs.extend(df_subset.model_ID.values[0:mnr].tolist()) #this will extend the list by each nr of id numbers
# create new grid
cycle_grid=list()
for gidx in best_methods_IDs:
cycle_grid.append(model_parameter_list[gidx]['params']) # yes its 'para ms'! its a mistake, that I have no time to correct
# train models
results_list, model_predictions_dict, model_parameter_list, model_history_dict = CNN_GridSearch(
# input data
method = method_name,
grid = cycle_grid,
file_namepath_table = subset_collection_composition_df,
# names to safe, used to identify input data & results
dataset_name = dataset_name,
dataset_variant = dataset_variant,
module_name = module_name,
run_name = run_name,
# names used to search for subset names and save results
class_encoding = class_encoding,
class_decoding = class_decoding,
dropout_value = dropout_value,
train_subset_name = selected_data_subsets_role["train"], # because I donth have to call that train in my files,
valid_subset_name = selected_data_subsets_role["valid"], # if None, train_proportion will be used
test_subset_name_list = selected_data_subsets_role["test"], # must correspond to subset_name in file_namepath_table if None, the loist is simply shorter,
unit_test = unit_test,
train_proportion = train_proportion_applied, # not available in that version, I would like to make it possible next version
# ... results and info,
store_predictions=True,
track_progres=display_partial_results,
plot_history=plot_history, # applied only if verbose==True
model_fit__verbose=0,
verbose=False
)
# this is for the next cylce
results_from_last_cycle = | pd.DataFrame(results_list) | pandas.DataFrame |
import numpy as np
import pymc3 as pm
import pandas as pd
#import pickle
#import wandb
#import yaml
import warnings
from sklearn.cluster import k_means
from scipy.special import softmax, logsumexp, loggamma
from sklearn.metrics.pairwise import cosine_similarity
from .constants import *
import pkg_resources
# constants
#C=32
#M=3
#P=2
def dirichlet(node_name, a, shape, scale=1, testval = None):
# dirichlet reparameterized here because of stickbreaking bug
# https://github.com/pymc-devs/pymc3/issues/4733
X = pm.Gamma(f'gamma_{node_name}', mu = a, sigma = scale, shape = shape, testval = testval)
Y = pm.Deterministic(node_name, (X/X.sum(axis = (X.ndim-1))[...,None]))
return Y
def load_config(config_fp):
# load the yaml file
with open(config_fp, 'r') as f:
config = yaml.safe_load(f)
print(f"Loaded configuration file {config_fp}")
# remove any parameters not applicable to selected data source
ds = config.pop('dataset')
ds[ds['dataset_sel']].update({'dataset_sel': ds['dataset_sel']})
# update dataset args to subsetted list
config.update({'dataset': ds[ds['dataset_sel']]})
## handle seeding
#config['dataset'].update({'data_rng': np.random.default_rng(config['dataset']['data_seed'])})
#config['model'].update({'model_rng': np.random.default_rng(config['model']['model_seed'])})
return config['dataset'], config['model'], config['pymc3']
def detect_naming_style(fp):
# first column must have type at minimum
df = pd.read_csv(fp, index_col=0, sep = None, engine = 'python')
naming_style = 'unrecognized'
# check if index is type style
if df.index.isin(mut96).any(): naming_style = 'type'
# check if index is type/subtype style
else:
df = df.reset_index()
df = df.set_index(list(df.columns[0:2]))
if df.index.isin(idx96).any(): naming_style = 'type/subtype'
assert naming_style == 'type' or naming_style == 'type/subtype', \
'Mutation type naming style could not be identified.\n'\
'\tExpected either two column type/subtype (ex. C>A,ACA) or\n'\
'\tsingle column type (ex A[C>A]A). See examples at COSMIC database.'
return naming_style
def load_sigs(fp):
warnings.warn("load_sigs is deprecated, see Damuta class", DeprecationWarning)
naming_style = detect_naming_style(fp)
if naming_style == 'type':
# read in sigs
sigs = pd.read_csv(fp, index_col = 0, sep = None, engine = 'python').reindex(mut96)
# sanity check for matching mut96, should have no NA
sel = (~sigs.isnull()).all(axis = 1)
assert sel.all(), f'invalid signature definitions: null entry for types {list(sigs.index[~sel])}'
# convert to pcawg convention
sigs = sigs.set_index(idx96)
elif naming_style == 'type/subtype':
# read in sigs
sigs = pd.read_csv(fp, index_col = (0,1), header=0).reindex(idx96)
# sanity check for idx, should have no NA
sel = (~sigs.isnull()).all(axis = 1)
assert sel.all(), f'invalid signature definitions: null entry for types {list(sigs.index[~sel])}'
# check colsums are 1
sel = np.isclose(sigs.sum(axis=0), 1)
assert sel.all(), f'invalid signature definitions: does not sum to 1 in columns {list(sigs.columns[~sel])}'
assert all(sigs.index == idx96) or all(sigs.index == mut96), 'signature defintions failed to be read correctly'
# force Jx96 and mut96 convention
sigs = sigs.T
sigs.columns = mut96
return sigs
def load_counts(counts_fp):
warnings.warn("load_counts is deprecated, see Damuta class", DeprecationWarning)
counts = pd.read_csv(counts_fp, index_col = 0, header = 0)[mut96]
assert counts.ndim == 2, 'Mutation counts failed to load. Check column names are mutation type (ex. A[C>A]A). See COSMIC database for more.'
assert counts.shape[1] == 96, f'Expected 96 mutation types, got {counts.shape[1]}'
return counts
def subset_samples(dataset, annotation, annotation_subset, sel_idx = 0):
# subset sample ids by matching to annotation_subset
# expect annotation_subset to be pd dataframe with ids as index
if annotation_subset is None:
return dataset, annotation
# stop string being auto cast to list
if type(annotation_subset) == str:
annotation_subset = [annotation_subset]
if annotation.ndim > 2:
warnings.warn(f"More than one annotation is available per sample, selection index {sel_idx}", UserWarning)
# annotation ids should match sample ids
assert dataset.index.isin(annotation.index).any(), 'No sample ID matches found in dataset for the provided annotation'
# reoder annotation (with gaps) to match dataset
annotation = annotation.reindex(dataset.index)
# partial matches allowed
sel = np.fromiter((map(any, zip(*[annotation[annotation.columns[sel_idx]].str.contains(x) for x in annotation_subset] ))), dtype = bool)
# type should appear in the type column of the lookup
assert sel.any(), 'Cancer type subsetting yielded no selection. Check keywords?'
dataset = dataset.loc[annotation.index[sel]]
annotation = annotation.loc[annotation.index[sel]]
return dataset, annotation
def save_checkpoint(fp, model, trace, dataset_args, model_args, pymc3_args, run_id):
with open(f'{fp}', 'wb') as buff:
pickle.dump({'model': model, 'trace': trace, 'dataset_args': dataset_args,
'model_args': model_args, 'pymc3_args': pymc3_args, 'run_id': run_id}, buff)
print(f'checkpoint saved to {fp}')
def load_checkpoint(fn):
with open(fn, 'rb') as buff:
data = pickle.load(buff)
print(f'checkpoint loaded from {fn}')
wandb.init(id=data['run_id'], resume='allow')
return data['model'], data['trace'], data['dataset_args'], data['model_args'], data['pymc3_args'], data['run_id']
def load_dataset(dataset_sel, counts_fp=None, annotation_fp=None, annotation_subset=None, seed=None,
data_seed = None, sig_defs_fp=None, sim_S=None, sim_N=None, sim_I=None, sim_tau_hyperprior=None,
sim_J=None, sim_K=None, sim_alpha_bias=None, sim_psi_bias=None, sim_gamma_bias=None, sim_beta_bias=None):
# load counts, or simulated data - as specified by dataset_sel
# seed -> rng as per https://albertcthomas.github.io/good-practices-random-number-generators/
if dataset_sel == 'load_counts':
dataset = load_counts(counts_fp)
annotation = pd.read_csv(annotation_fp, index_col = 0, header = 0)
dataset, annotation = subset_samples(dataset, annotation, annotation_subset)
return dataset, annotation
elif dataset_sel == 'sim_from_sigs':
sig_defs = load_sigs(sig_defs_fp)
dataset, sim_params = sim_from_sigs(sig_defs, sim_tau_hyperprior, sim_S, sim_N, sim_I, seed)
return dataset, sim_params
elif dataset_sel == 'sim_parametric':
dataset, sim_params = sim_parametric(sim_J,sim_K,sim_S,sim_N,sim_alpha_bias,sim_psi_bias,sim_gamma_bias,sim_beta_bias,seed)
return dataset, sim_params
else:
assert False, 'dataset selection not recognized'
def load_datasets(dataset_args):
yargs = dataset_args.copy()
ca = [load_dataset(counts_fp = j[0], annotation_fp = j[1], **yargs) for j in zip(yargs.pop('counts_fp'), yargs.pop('annotation_fp'))]
counts = pd.concat([c[0] for c in ca ])
annotation = | pd.concat([a[1] for a in ca]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 11:40:41 2019
@author: johnmount
"""
import numpy
import warnings
import pandas
import scipy.stats
import statistics
def can_convert_v_to_numeric(x):
"""check if non-empty vector can convert to numeric"""
try:
numpy.asarray(x + 0, dtype=float)
return True
except TypeError:
return False
def is_bad(x):
""" for numeric vector x, return logical vector of positions that are null, NaN, infinite"""
if can_convert_v_to_numeric(x):
x = numpy.asarray(x + 0, dtype=float)
return numpy.logical_or(
pandas.isnull(x), numpy.logical_or(numpy.isnan(x), numpy.isinf(x))
)
return pandas.isnull(x)
def characterize_numeric(x):
"""compute na count, min,max,mean of a numeric vector"""
x = numpy.asarray(x).astype(float)
not_bad = numpy.logical_not(is_bad(x))
n_not_bad = sum(not_bad)
n = len(x)
if n_not_bad <= 0:
return {
"n": n,
"n_not_bad": n_not_bad,
"min": None,
"mean": None,
"max": None,
"varies": False,
"has_range": False,
}
x = x[not_bad]
mn = numpy.min(x)
mx = numpy.max(x)
return {
"n": n,
"n_not_bad": n_not_bad,
"min": mn,
"mean": numpy.mean(x),
"max": mx,
"varies": (mx > mn) or ((n_not_bad > 0) and (n_not_bad < n)),
"has_range": (mx > mn),
}
def grouped_by_x_statistics(x, y):
"""compute some grouped by x vector summaries of numeric y vector (no missing values in y)"""
n = len(x)
if n <= 0:
raise ValueError("no rows")
if n != len(y):
raise ValueError("len(y)!=len(x)")
eps = 1.0e-3
sf = pandas.DataFrame({"x": x, "y": y})
sf.reset_index(inplace=True, drop=True)
bad_posns = pandas.isnull(sf["x"])
sf.loc[bad_posns, "x"] = "_NA_"
global_mean = sf["y"].mean()
sf["_group_mean"] = sf.groupby("x")["y"].transform("mean")
sf["_var"] = (sf["y"] - sf["_group_mean"]) ** 2
sf["_ni"] = 1
sf = sf.groupby("x").sum()
sf.reset_index(inplace=True, drop=False)
sf["y"] = sf["y"] / sf["_ni"]
sf["_group_mean"] = sf["_group_mean"] / sf["_ni"]
sf["_var"] = sf["_var"] / (sf["_ni"] - 1) + eps
avg_var = 0
bad_vars = is_bad(sf["_var"])
if sum(bad_vars) < len(sf["_var"]):
avg_var = numpy.nanmean(sf["_var"])
sf.loc[bad_vars, "_var"] = avg_var
if sf.shape[0] > 1:
sf["_vb"] = statistics.variance(sf["_group_mean"]) + eps
else:
sf["_vb"] = eps
sf["_gm"] = global_mean
# hierarchical model is in:
# http://www.win-vector.com/blog/2017/09/partial-pooling-for-lower-variance-variable-encoding/
# using naive empirical estimates of variances
# adjusted from ni to ni-1 and +eps variance to make
# rare levels look like new levels.
sf["_hest"] = (
(sf["_ni"] - 1) * sf["_group_mean"] / sf["_var"] + sf["_gm"] / sf["_vb"]
) / ((sf["_ni"] - 1) / sf["_var"] + 1 / sf["_vb"])
return sf
def score_variables(cross_frame, variables, outcome):
"""score the linear relation of varaibles to outcomename"""
if len(variables) <= 0:
return None
n = cross_frame.shape[0]
if n != len(outcome):
raise ValueError("len(n) must equal cross_frame.shape[0]")
def f(v):
col = cross_frame[v]
col = numpy.asarray(col)
if n > 0 and numpy.max(col) > numpy.min(col):
with warnings.catch_warnings():
est = scipy.stats.pearsonr(cross_frame[v], outcome)
sfi = pandas.DataFrame(
{
"variable": [v],
"has_range": True,
"PearsonR": est[0],
"significance": est[1],
}
)
else:
sfi = pandas.DataFrame(
{
"variable": [v],
"has_range": False,
"PearsonR": numpy.NaN,
"significance": 1,
}
)
return sfi
sf = [f(v) for v in variables]
if len(sf) <= 0:
return None
sf = | pandas.concat(sf, axis=0, sort=False) | pandas.concat |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
| assert_series_equal(pd.NaT > left, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.cluster import KMeans, DBSCAN
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import silhouette_score, silhouette_samples
import numpy as np
import matplotlib.cm as cm
from sklearn.decomposition import PCA
import seaborn as sns
def textPreprocessing(df):
stop = stopwords.words('english')
df['text'] = df['text'].str.replace('[^\w\s]', '')
df['text'] = df['text'].apply(lambda x: " ".join(x for x in x.split() if x not in stop))
df['text'] = df['text'].apply(lambda x: " ".join(x.lower() for x in x.split()))
stemmer = PorterStemmer()
df['text'] = df['text'].apply(lambda x: " ".join([stemmer.stem(word) for word in x.split()]))
df['text'] = df['text'].apply(lambda x: " ".join(x for x in x.split() if x not in stop))
return df
def TFIDFtransformer(df):
tfidf = TfidfVectorizer(max_features=100, analyzer='word', ngram_range=(1, 3), stop_words='english')
tfidfDF = pd.DataFrame(tfidf.fit_transform(df['text']).toarray())
cols = tfidf.get_feature_names()
return tfidfDF, cols
# elbow rule plot
def optimalK_SSE(data):
sum_of_squared_distances = []
K = range(2, 11)
for k in K:
km = KMeans(n_clusters=k)
km = km.fit(data)
sum_of_squared_distances.append(km.inertia_)
plt.plot(K, sum_of_squared_distances, 'bx-')
plt.xlabel('k')
plt.ylabel('Sum_of_squared_distances')
plt.title('Elbow Method For Optimal k')
plt.show()
def optimalK_Silouhette(data):
silhouette_scores = []
K = range(2, 11)
for k in K:
km = KMeans(n_clusters=k)
cluster_labels = km.fit_predict(data)
silhouette_avg = silhouette_score(data, cluster_labels)
silhouette_scores.append(silhouette_avg)
plt.plot(K, silhouette_scores, 'bx-')
plt.xlabel('k')
plt.ylabel('Silhouette Scores')
plt.title('Silhouette For Optimal k')
plt.show()
def plotSilouhette(X):
range_n_clusters = range(2, 11)
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X.iloc[:, 0], X.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
# plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
# "with n_clusters = %d" % n_clusters),
# fontsize=14, fontweight='bold')
plt.show()
def intepretationWithVariance():
print()
if __name__ == '__main__':
# load reviews csv
# df = pd.read_csv('/home/andreas/Documents/Notebooks/TripAdvisor/reviews.csv')
# print(df.head())
# print(df.dtypes)
#
# # keep ony text
# df = df[['username', 'text']]
#
# # group by username & merge reviews text to a unified corpus
# df = df.groupby('username').agg({
# 'text': lambda x: ' '.join(x)
# }).reset_index()
#
# # Text Pre-Processing
# df = textPreprocessing(df)
#
# # Text Vectorization using TFIDF
# df, cols = TFIDFtransformer(df)
# df.columns = cols
# print(df.head())
#
# df.to_csv('tfidf.csv', index=False)
df = pd.read_csv('tfidf.csv')
# print('before outliers:', len(df))
# remove outliers
# from scipy import stats
# df = df[(np.abs(stats.zscore(df)) < 3).all(axis=1)]
# print('after outliers:', len(df))
pca = PCA(n_components=2)
pcadf = pd.DataFrame(pca.fit_transform(df))
print(pca.explained_variance_ratio_)
# df.to_csv('pcaDF.csv', index=False)
# pcadf = pd.read_csv('pcaDF.csv')
# Clustering Evaluation
optimalK_SSE(pcadf)
optimalK_Silouhette(pcadf)
# plotSilouhette(pcadf)
# dbscan = DBSCAN(eps=1, min_samples=2).fit(df)
# print('DBSCAN: {}'.format(silhouette_score(df, dbscan.labels_,
# metric='cosine')))
km = KMeans(n_clusters=3).fit(pcadf)
# cluster_labels = km.fit_predict(df)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
df_scaled = pd.DataFrame(scaler.fit_transform(df))
df_scaled['cluster_id'] = km.labels_
df_mean = df_scaled.groupby('cluster_id').mean()
# df_mean.columns = df.columns
results = pd.DataFrame(columns=['Variable', 'Var'])
for column in df_mean.columns:
print(column)
results.loc[len(results), :] = [column, np.var(df_mean[column])]
selected_columns = list(results.sort_values(
'Var', ascending=False,
).head(15).Variable.values) + ['cluster_id']
tidy = df_scaled[selected_columns].melt(id_vars='cluster_id')
tidy['variable'] = tidy['variable'].apply(lambda x: df.columns[x])
# clrs = ['grey' if (x < max(tidy['value'])) else 'red' for x in tidy['value']]
sns.barplot(x='cluster_id', y='value', hue='variable', data=tidy)
plt.legend(bbox_to_anchor=(1.01, 1),
borderaxespad=0)
plt.title('Interpretation with feature variance')
plt.show()
# for i in selected_columns:
# print(str(i) + ': ' + str(df.columns[i]))
from sklearn.ensemble import RandomForestClassifier
X, y = df_scaled.iloc[:, :-1], df_scaled.iloc[:, -1]
clf = RandomForestClassifier(n_estimators=100).fit(X, y)
data = np.array([clf.feature_importances_, X.columns]).T
columns = list( | pd.DataFrame(data, columns=['Importance', 'Feature']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
load_monkey : load_monkey(mCode)
returns: *trials, dates*
get_short : get_short(trials)
**
del_errorType : del_errorTrials(trials, errorType = 'all')
.
: by_sequenceType(trials, sequenceType)
.
: by_sequenceMode(trials, sequenceMode)
.
: by_date(df_full, date, sequence_mode = 'all', sequence_type = 'all', filter_errors = True)
.
by_range :
.
: by_gamble(df_full)
.
"""
import pandas as pd
import numpy as np
import scipy.optimize as opt
from macaque.f_toolbox import *
from macaque.f_choices import get_psychData, get_options
from scipy.stats.distributions import t
import seaborn as sb
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
plt.rcParams['svg.fonttype'] = 'none'
tqdm = ipynb_tqdm()
#%%
def get_softmaxData(choiceData,
metricType='ce',
minSecondaries=4,
minChoices=4,
plotTQDM=True):
'''
From a 'choiceData' dataFrame, retrieve psychometric data used in certainty/probability equivalents, choice ratios, reaction times. \n
**IMPORTANT**:\n
If 'choiceData' is *divided into blocked or sequence-specific choices*, get_softmaxData returns block or sequence specific results (per day of testing).
Parameters
----------
choiceData : DataFrame
DataFrame of psychophysics data i.e. CE or PE sequences
metricType : string
'CE' / 'certainty equivalent' or 'PE'/'probability equivalent' psychometric fits on the choice Data, \
'Trans' orders without computing psychometrics
minSecondaries : int
Number of secondary options against which the primary is tested (e.g. safes for a single gamble)
minChoices : int
Number or choice made between the primary and secondary options (e.g. safes repeated n times per gamble)
trials : None or DataFrame
Dataframe from which original trials can be used to merge similar blocks that come one after another *(only useful for blocked choice data)*
Returns
----------
softmaxDF : DataFrame
Returns psychometric data used to plot softmax curves, reaction times, and choice rations between gamble/safe pairs and sequences
----------
future: needs to print proper Confidence Intervals
'''
# This is in case thedata has been divided in blocks/sequence types
# (useful for further analysis)
if ('division' in choiceData.columns) and (len(
choiceData.sessionDate.unique()) > 1):
dfs = []
for day in tqdm(
choiceData.sessionDate.unique(),
desc='Computing block-based Psychophysics',
disable=not plotTQDM):
for div in choiceData.loc[choiceData.sessionDate ==
day].division.unique():
tailEnd = get_softmaxData(
(choiceData.loc[choiceData.sessionDate == day]
.loc[choiceData.division == div]), metricType,
minSecondaries, minChoices)
if tailEnd is None:
continue
else:
dfs.append(
tailEnd.assign(division=div).assign(sessionDate=day))
softmaxDF = pd.concat(dfs, ignore_index=True)
if metricType.lower() == 'ce' or metricType.lower() == 'certainty equivalent':
cols = [
'sessionDate', 'primary', 'primaryEV', 'equivalent',
'secondary', 'secondaryEV', 'm_range', 'freq_sCh', 'pFit',
'pSTE', 'no_of_Trials', 'nTrials', 'primarySide', 'choiceList',
'filteredRT', 'choiceTimes', 'moveTime', 'trial_index',
'oClock', 'func', 'metricType', 'division', 'seqCode', 'gList',
'chosenEV'
]
elif metricType.lower() == 'pe' or metricType.lower() == 'probability equivalent':
cols = [
'sessionDate', 'primary', 'primaryEV', 'equivalent', 'freq_sCh',
'secondary', 'secondaryEV', 'm_range', 'pFit', 'pSTE',
'no_of_Trials', 'nTrials', 'primarySide', 'choiceList',
'filteredRT', 'choiceTimes', 'moveTime', 'trial_index',
'oClock', 'func', 'metricType', 'division', 'seqCode', 'gList',
'chosenEV'
]
elif metricType.lower() == 'none':
cols = [
'sessionDate', 'primary', 'primaryEV', 'secondary',
'secondaryEV', 'm_range', 'freq_sCh', 'no_of_Trials', 'nTrials',
'primarySide', 'choiceList', 'filteredRT', 'choiceTimes',
'moveTime', 'trial_index', 'oClock', 'metricType', 'division',
'seqCode', 'gList', 'chosenEV'
]
else:
cols = [
'sessionDate', 'primary', 'primaryEV', 'secondary',
'secondaryEV', 'm_range', 'freq_sCh', 'no_of_Trials', 'nTrials',
'primarySide', 'choiceList', 'filteredRT', 'choiceTimes',
'moveTime', 'trial_index', 'oClock', 'metricType', 'division',
'seqCode', 'gList', 'chosenEV'
]
return psychometricDF(softmaxDF[cols])
#-------------------------------------------------------------------------
else:
cols = [
'primary', 'primaryEV', 'secondary', 'secondaryEV', 'm_range',
'freq_sCh', 'primarySide', 'no_of_Trials', 'nTrials', 'choiceList',
'filteredRT', 'choiceTimes', 'moveTime', 'trial_index', 'oClock',
'metricType', 'chosenEV'
]
# softmaxDF = pd.DataFrame(columns=cols)
dfs = []
psychData = get_psychData(choiceData, metricType, transitType='None')
unique_options = unique_listOfLists(psychData.option1)
for option in unique_options:
# find index for specfic option1 gamble
index = psychData['option1'].apply(lambda x: x == option)
mags = []
igg = {}
trialType = []
# here we define different secondary gambles from their magnitudes
# LOOK HERE FOR THE ISSUE OF != 2
if psychData.loc[index].loc[psychData.loc[index].option2.apply(
lambda x: len(x)) != 2].option2.values.tolist() != []:
gg = psychData.loc[index].loc[
psychData.loc[index].option2.apply(lambda x: len(x)) !=
2].option2.apply(lambda x: [x[0], x[2]])
mags, igg = unique_listOfLists(gg, returnIndex=True)
for nn in mags:
igg[tuple(nn)] = gg.iloc[igg[tuple(nn)]].index
trialType = mags[:]
# here we define safe secondary options as unique
if psychData.loc[index].loc[psychData.loc[index].option2.apply(
lambda x: len(x)) == 2].index.tolist() != []:
listy = psychData.loc[index].loc[psychData.loc[
index].option2.apply(lambda x: len(x)) == 2].option2.apply(
lambda x: x[0])
mags.append([min(listy), max(listy)]) # add the safes to this
igg[tuple([
min(listy), max(listy)
])] = psychData.loc[index].loc[psychData.loc[
index].option2.apply(lambda x: len(x)) == 2].index.tolist()
trialType.append(['safe'])
for m_range, tt in zip(mags, trialType):
# make series of trial numbers for minChoices filter
choiceRepeats = psychData.loc[igg[tuple(
m_range)]].no_of_Trials.values.tolist()
if len([lens for lens in choiceRepeats if lens >= minChoices
]) >= minSecondaries:
# condition to evaluate the options
# import pdb ; pdb.set_trace() #AWESOME WAY TO DEBUG
subDf = psychData.loc[igg[tuple(m_range)]].loc[
psychData.loc[igg[tuple(m_range)]].no_of_Trials >=
minChoices].sort_values('option2')
if np.size(subDf) == 0:
continue
if tt != ['safe']:
# look at the magnitude in option 2 fields
marker = [m[-1] for m in subDf.option2]
else:
marker = [m[0] for m in subDf.option2]
try:
seq = int(subDf.seqType.unique()[0])
gList = subDf.gList.unique()[0]
except BaseException:
seq = []
gList = []
dfs.append(
pd.DataFrame({
'primary': [
flatten(
unique_listOfLists(
subDf.option1.values.tolist()))
],
'primaryEV':
np.unique(subDf.G1_ev.values.tolist()).tolist(),
'secondary': [subDf.option2.values.tolist()],
'secondaryEV':
[np.unique(subDf.G2_ev.values.tolist()).tolist()],
'm_range': [m_range],
'freq_sCh': [(subDf.chose2 /
subDf.no_of_Trials).values.tolist()],
'no_of_Trials':
[subDf.no_of_Trials.values.tolist()],
'nTrials':
[sum(subDf.no_of_Trials.values.tolist())],
'choiceList': [{
key: value for key, value in zip(
marker, subDf.choiceList.values.tolist())
}],
'choiceTimes': [{
key: value for key, value in zip(
marker, subDf.choiceTimes.values.tolist())
}],
'filteredRT': [{
key: value for key, value in zip(
marker, subDf.filteredRT.values.tolist())
}],
'moveTime': [{
key: value for key, value in zip(
marker, subDf.moveTime.values.tolist())
}],
'trial_index': [{
key: value for key, value in zip(
marker, subDf.trial_index.values.tolist())
}],
'oClock': [{
key: value for key, value in zip(
marker, subDf.oClock.values.tolist())
}],
'primarySide': [{
key: value for key, value in zip(
marker, subDf.side_of_1.values.tolist())
}],
'metricType': [metricType.upper()],
'seqCode': [seq],
'gList': [gList],
'chosenEV': [{
key: value for key, value in zip(
marker, subDf.chosenEV.values.tolist())
}]
}))
if dfs == []:
softmaxDF = pd.DataFrame(columns=cols)
else:
softmaxDF = pd.concat(dfs, ignore_index=True)
if softmaxDF.empty:
return None
if metricType.lower() == 'ce' or metricType.lower(
) == 'certainty equivalent' or metricType.lower(
) == 'pe' or metricType.lower() == 'probability equivalent':
cols = [
'primary', 'primaryEV', 'equivalent', 'secondary',
'secondaryEV', 'm_range', 'freq_sCh', 'pFit', 'pSTE',
'primarySide', 'no_of_Trials', 'nTrials', 'choiceList',
'filteredRT', 'choiceTimes', 'moveTime', 'trial_index',
'oClock', 'func', 'metricType', 'seqCode', 'gList', 'chosenEV'
]
softmaxDF = fit_softmax(softmaxDF, metricType)
elif metricType.lower() == 'trans' or metricType.lower(
) == 'transitivity':
cols = [
'primary', 'primaryEV', 'secondary', 'secondaryEV', 'm_range',
'freq_sCh', 'primarySide', 'no_of_Trials', 'nTrials',
'choiceList', 'filteredRT', 'choiceTimes', 'moveTime',
'trial_index', 'oClock', 'metricType', 'seqCode', 'gList',
'chosenEV'
]
# import pdb ; pdb.set_trace() #AWESOME WAY TO DEBUG
return psychometricDF(softmaxDF[cols])
#%%
def fit_softmax(softmaxDF, metricType='CE'):
'''
'''
np.warnings.filterwarnings('ignore')
dList = []
# sigmoid = lambda x, p1, p2: 1/(1+ np.exp( -(1/p2) * (x - p1) ) )
# #logistic sigmoid function
# logistic sigmoid function (SAME AS ABOVE)
def sigmoid(x, p1, p2):
return np.array(1 / (1 + np.exp(-(x - p1) / p2)))
#softmax = lambda x, p1, p2: np.exp( -(x - p1)/p2 ) / np.exp( -(x - p1)/p2 ) + np.exp( -(x - p1)/p2 )
for i, situation in (softmaxDF.iterrows()):
if metricType.lower() == 'ce':
x_mag = [m[0] for m in situation.secondary]
# primary = situation.primary[::2]
chX_freq = situation.freq_sCh # need the frequency of picking safe, not gamble
elif metricType.lower() == 'pe':
x_mag = [g[3] for g in situation.secondary]
# primary = flatten([g[::2] for g in situation.secondary])
chX_freq = situation.freq_sCh # need the frequency of picking safe, not gamble
# define p0 as dynamic points of entry
p0 = [max(x_mag) / 2, 0.015]
try: # get the modelling done, but have a catch if it doesn't work
# param_bounds=([min(primary), -np.inf],[max(primary), np.inf])
pFit, pCov = opt.curve_fit(
sigmoid, x_mag, chX_freq, p0=p0,
method='trf') # , bounds=param_bounds)
# standard error on parameters
pSTE = np.sqrt(np.diag(pCov).tolist())
dList.append({
'pFit': pFit.tolist(),
'pSTE': pSTE.tolist(),
'func': sigmoid,
'equivalent': pFit[0]
})
except RuntimeError:
dList.append({
'pFit': np.nan,
'pSTE': np.nan,
'func': sigmoid,
'equivalent': np.nan
})
return pd.concat(
[softmaxDF, pd.DataFrame(dList)], axis=1, join_axes=[softmaxDF.index])
#%%
def plot_softmax(softmaxDF,
sortBy='primaryEV',
printRatios=True,
plot_ci='fit',
color=None):
'''
From a softmax dataFrame, plot the softmax curves either individually or all at once.
Parameters
----------
softmaxDF : DataFrame
DataFrame of psychophysics data i.e. CE or PE sequences
info : DataFrame
Contains the information thta we can plot about the day's session (ml rank, choice percentages, etc...)
Returns
----------
Plots softmax-normalized sigmoid curves that fit to the choice bahviour of the animals
----------
future: needs to print proper Confidence Intervals
'''
from operator import add
from operator import sub
import matplotlib.cm as cm
def get_range(sm):
softmaxType = np.unique(sm['metricType']) # plotting the model
if softmaxType.item().lower() == 'ce':
primary = np.concatenate([x[::2] for x in sm['primary']])
return min(primary), max(primary)
elif softmaxType.item().lower() == 'pe':
return 0, 1
else:
raise NameError('No psychometrics possible')
def get_XYpoints(row):
if row.metricType.lower() == 'ce':
x_mag = [m[0] for m in row.secondary]
chX_freq = row.freq_sCh # need the frequency of picking safe, not gamble
EV = row.primaryEV
elif row.metricType.lower() == 'pe':
x_mag = [g[3] for g in row.secondary]
chX_freq = row.freq_sCh # need the frequency of picking safe, not gamble
EV = row.secondary
return x_mag, chX_freq, EV
def smSubplots(sm, printRatios=printRatios, plot_ci=plot_ci, color=None):
rows = np.ceil(len(sm) / 9)
if len(sm) < 5:
fig, ax = plt.subplots(
int(rows), len(sm), squeeze=False, figsize=(10, int(rows * 4)))
maxC = len(sm) - 1
else:
fig, ax = plt.subplots(
int(rows), 9, squeeze=False, figsize=(15, int(rows * 2)))
maxC = 8
minX, maxX = get_range(sm)
xx = np.linspace(minX, maxX, 100)
c = 0
r = 0
# --------------------------------------
for ind, row in sm.iterrows():
x_mag, chX_freq, EV = get_XYpoints(row)
func = row.func
if color:
col = color[c]
else:
col = cm.rainbow(row.primaryEV * 2)
ax[r, c].set_title(str(row['primary']))
# plot points of selection
ax[r, c].plot(x_mag, chX_freq, 'bo ', color=col)
# plots a line at the expected value
ax[r, c].axvline(x=EV, linestyle='--', color='k', alpha=0.7)
if printRatios:
for m, p, nn in zip(x_mag, chX_freq, row['no_of_Trials']):
ax[r, c].text(
m + 0.015,
p + 0.015,
str(int(p * nn)) + '/' + str(int(nn)),
style='italic',
color='k',
alpha=0.65)
ax[r, c].grid(b=True, which='major', axis='y')
if not np.isnan(row['equivalent']):
ax[r, c].plot(xx, func(xx, *row['pFit']), color=col)
if row['primaryEV'] > np.mean((minX, maxX)):
ax[r, c].text(minX + 0.02, 1.0,
'CE=' + str(round(row['pFit'][0], 2)))
else:
ax[r, c].text(row['pFit'][0] + 0.02, 0.05,
'CE=' + str(round(row['pFit'][0], 2)))
if row['pFit'][0] > EV:
ax[r, c].axvline(x=row['pFit'][0], linestyle='-', color='g')
elif row['pFit'][0] < EV:
ax[r, c].axvline(x=row['pFit'][0], linestyle='-', color='r')
elif row['pFit'][0] == EV:
ax[r, c].axvline(x=row['pFit'][0], linestyle='-', color='k')
ax[r, c].set_xlim(minX - 0.05, maxX + 0.05) # x axis length
ax[r, c].set_ylim(-0.1, 1.2) # y axis length
if plot_ci.lower() == 'residuals':
bound_upper, bound_lower = softmax_CI(row, xx, method=plot_ci)
elif plot_ci.lower() == 'resampling':
bound_upper, bound_lower = softmax_CI(row, xx, method=plot_ci)
elif plot_ci.lower() == 'fit':
bound_upper = func(xx, *map(add, row['pFit'], row['pSTE']))
bound_lower = func(xx, *map(sub, row['pFit'], row['pSTE']))
ax[r, c].fill_between(
xx, bound_lower, bound_upper, color=col, alpha=0.2)
c += 1
if c > maxC:
c = 0
r += 1
#--------------------------------------------------------------
if c < maxC and r == rows - 1:
while c <= maxC:
fig.delaxes(ax[r, c])
c += 1
# plt.tight_layout(rect=[0, 0.03, 1, 0.95])
for axis in ax.reshape(-1):
x0, x1 = axis.get_xlim()
y0, y1 = axis.get_ylim()
axis.set_aspect((x1 - x0) / (y1 - y0))
if 'sessionDate' in sm.columns:
plt.suptitle(str(sm['sessionDate'].unique()[0]))
plt.show()
# -----------------------------------------------
if isinstance(softmaxDF, pd.core.series.Series):
softmaxDF = softmaxDF.to_frame().transpose()
if 'sessionDate' in softmaxDF.columns:
for date in softmaxDF['sessionDate'].unique():
sm = softmaxDF.loc[softmaxDF['sessionDate'] == date]
sm.sort_values([sortBy], inplace=True)
smSubplots(
sm, printRatios=printRatios, plot_ci=plot_ci, color=color)
else:
softmaxDF.sort_values([sortBy], inplace=True)
smSubplots(
softmaxDF, printRatios=printRatios, plot_ci=plot_ci, color=color)
return
#%%
def softmax_CI(softmaxDF, xx, method='resampling', n=1000):
import numpy as np
import scipy.optimize as opt
#------------------------------
def get_choiceRatio(data):
ch = []
chX_freq = []
x_mag = []
for dd in data:
if dd[0] not in x_mag and ch == []:
x_mag.extend(dd[0])
elif dd[0] not in x_mag and ch != []:
x_mag.extend(dd[0])
chX_freq.extend([sum(ch) / len(ch)])
ch = []
ch.extend(dd[1])
chX_freq.extend([sum(ch) / len(ch)])
return x_mag, chX_freq
#-------------------------------
safes = []
chS = []
safes.extend(
np.concatenate([
np.repeat(item, len(values))
for item, values in softmaxDF.choiceList.items()
]))
chS.extend(
np.concatenate(
[values for item, values in softmaxDF.choiceList.items()]) - 1)
data = np.array(np.split(np.array([safes, chS]), len(chS), axis=1))
x_mag, chX_freq = get_choiceRatio(data)
p0 = [max(x_mag) / 2, 0.015]
# logistic sigmoid function (SAME AS ABOVE)
def sigmoid(x, p1, p2):
return np.array(1 / (1 + np.exp(-(x - p1) / p2)))
# this give same result as matlab softmax
pFit_1, pCov = opt.curve_fit(sigmoid, x_mag, chX_freq, p0=p0, method='trf')
resid = sigmoid(x_mag, pFit_1[0], pFit_1[1]) - chX_freq
yHat = sigmoid(x_mag, pFit_1[0], pFit_1[1])
# xx=xx = np.linspace(0,1,100)
b1 = []
#b1.append(sigmoid(xx, pFit_1[0], pFit_1[1]))
for i in np.arange(1, n):
if method.lower() == 'residuals':
residBoot = np.random.permutation(resid)
yBoot = yHat + residBoot
# this give same result as matlab softmax
pFit, pCov = opt.curve_fit(
sigmoid, x_mag, yBoot, p0=p0, method='trf')
elif method.lower() == 'resampling':
xb = np.random.choice(range(len(data)), len(data), replace=True)
bootSample = np.hstack(data[xb])
bootSample = bootSample[:, np.argsort(bootSample[0])]
bootSample = np.array(np.split(bootSample, len(data), axis=1))
bootx, booty = get_choiceRatio(bootSample)
try:
# this give same result as matlab softmax
pFit, pCov = opt.curve_fit(
sigmoid, bootx, booty, p0=p0, method='trf')
except BaseException:
continue
if pFit[1] < 0.002:
def sigmoid(x, p1):
return np.array(1 / (1 + np.exp(-(x - p1) / 0.002)))
# this give same result as matlab softmax
pFit, pCov = opt.curve_fit(
sigmoid, bootx, booty, p0=p0[0], method='trf')
pFit = [pFit, 0.002]
# logistic sigmoid function (SAME AS ABOVE)
def sigmoid(x, p1, p2):
return np.array(1 / (1 + np.exp(-(x - p1) / p2)))
b1.append(sigmoid(xx, pFit[0], pFit[1]))
b1 = np.vstack(b1)
upper, lower = np.percentile(b1, [5, 95], axis=0)
return upper, lower
#%%
def plot_transitivity(softmaxDF):
'''
From a softmax dataFrame, plot the softmax curves either individually or all at once.
Parameters
----------
softmaxDF : DataFrame
DataFrame of psychophysics data i.e. CE or PE sequences
info : DataFrame
Contains the information thta we can plot about the day's session (ml rank, choice percentages, etc...)
Returns
----------
Plots softmax-normalized sigmoid curves that fit to the choice bahviour of the animals
----------
future: needs to print proper Confidence Intervals
'''
if softmaxDF.empty:
return
import numpy as np
from macaque.f_toolbox import flatten
# -------------------------------------------- where primary function starts
if ('sessionDate' in softmaxDF.columns) and (len(
softmaxDF.sessionDate.unique()) > 1):
for day in softmaxDF.sessionDate.unique():
for div in softmaxDF.seqCode.unique():
# .sort_values(['primaryEV']))
plot_transitivity(softmaxDF.loc[softmaxDF.sessionDate == day]
.loc[softmaxDF.seqCode == div])
else:
# if there is a date to the softmax row, add the date to the subplot
i = 0
ratios = []
indice = []
leftAxis = []
rightAxis = []
lookup = []
for index, row in softmaxDF.iterrows():
# np.sort(row.secondary)
leftAxis.extend(
np.repeat(str(row.primary), len(row.freq_sCh), axis=0).tolist())
rightAxis.extend(row.secondary)
for choice_ratio in row.freq_sCh:
ratios.extend([choice_ratio - 0.5])
indice.extend([i])
i += 1
lookup.extend([i])
colors = []
for ii, ration in enumerate(ratios):
if ration > 0:
colors.extend('g')
elif ration < 0:
colors.extend('r')
else:
colors.extend('k')
fig, axarr = plt.subplots(
figsize=(8, len(flatten(softmaxDF.freq_sCh.tolist())) / 4))
if 'sessionDate' in softmaxDF.columns:
axarr.set_title(
softmaxDF.sessionDate.apply(lambda x: x.strftime("%Y-%m-%d"))
.unique().tolist()[0] + ': division ' + str(
softmaxDF.seqCode.unique().tolist()[0])
) # this sets the subplot's title
axarr.barh(indice, ratios, color=colors)
axarr.axvline(x=0, linestyle='-', color='k', alpha=1)
axarr.axvline(x=0.25, linestyle='--', color='k', alpha=0.6)
axarr.axvline(x=-0.25, linestyle='--', color='k', alpha=0.6)
plt.yticks(indice, leftAxis)
axarr.set_ylim(min(indice) - 1, max(indice) + 2) # y axis length
plt.tight_layout()
axarr2 = axarr.twinx()
axarr2.barh(indice, ratios, alpha=0)
for ii, chR, nT in zip(indice, flatten(softmaxDF.freq_sCh.tolist()),
flatten(softmaxDF.no_of_Trials.tolist())):
if chR > 0.5:
axarr2.text(
chR - 0.5 + 0.015,
ii - 0.25,
str(int(chR * nT)) + '/' + str(int(nT)),
style='italic',
color='k',
alpha=0.65,
fontsize='smaller')
else:
axarr2.text(
chR - 0.5 - 0.08,
ii - 0.25,
str(int(chR * nT)) + '/' + str(int(nT)),
style='italic',
color='k',
alpha=0.65,
fontsize='smaller')
for lines in lookup:
axarr2.axhline(y=lines - 0.5, linestyle='-', color='b', alpha=1)
plt.yticks(indice, rightAxis)
axarr2.set_ylim(min(indice) - 1, max(indice) + 2) # y axis length
axarr2.set_xlim(-0.6, 0.6) # y axis length
plt.tight_layout()
plt.show()
#%%
def plot_reactionTime(softmaxDF):
'''
CHANGE THIS BECAUSE IT DOES NOT PRINT PROPER CONFIDENCE INTERVAL
'''
import scipy.stats as stats
import numpy as np
if ('sessionDate' in softmaxDF.columns) and (len(
softmaxDF.sessionDate.unique()) > 1):
for day in softmaxDF.sessionDate.unique():
for div in softmaxDF.seqCode.unique():
# .sort_values(['primaryEV']))
plot_reactionTime(softmaxDF.loc[softmaxDF.sessionDate == day]
.loc[softmaxDF.seqCode == div])
else:
if isinstance(softmaxDF, pd.core.series.Series):
plot_Times(softmaxDF)
else:
softmaxDF.sort_values(['primaryEV'], inplace=True)
fig, axarr = plt.subplots(
len(softmaxDF),
1,
squeeze=False,
figsize=(8,
len(softmaxDF) * 2.3)) # this is the subplot command
ii = 0
if ('sessionDate' in softmaxDF.columns):
plt.suptitle(
softmaxDF.sessionDate.apply(
lambda x: x.strftime("%Y-%m-%d")).unique().tolist()[0] +
': division ' + str(softmaxDF.seqCode.unique().tolist()[0]),
fontsize=16)
for index, row in softmaxDF.iterrows():
plot_Times(row, subPlace=axarr, subPlot=ii)
ii += 1
# plt.tight_layout()
plt.show()
#%%
def plot_Times(situation, subPlace=None, subPlot=0):
import scipy.stats as stats
import numpy as np
plt.rc('axes', axisbelow=True)
choiceDict = situation.choiceTimes # plotting the model
moveDict = situation.moveTime # plotting the model
if np.unique([len(x) for x in situation.secondary]) == 2:
x_mag = [m[0] for m in situation.secondary]
EV = situation.primaryEV
elif np.unique([len(x) for x in situation.secondary]) == 4:
x_mag = [g[3] for g in situation.secondary]
EV = situation.primaryEV
steCH = []
avgCH = []
steMV = []
avgMV = []
for x in x_mag:
chTimes = choiceDict[x]
steCH.append(stats.sem(np.array(chTimes)))
avgCH.append(np.mean(chTimes))
mvTimes = moveDict[x]
steMV.append(stats.sem(np.array(mvTimes)))
avgMV.append(np.mean(mvTimes))
# import pdb ; pdb.set_trace() #AWESOME WAY TO DEBUG
#fig, axarr = plt.subplots(figsize=(8, len(flatten(softmaxDF.freq_sCh.tolist()))/4))
subPlace[subPlot, 0].grid(b=True, which='major', axis='y')
subPlace[subPlot, 0].bar(
x_mag, avgCH, width=0.035, yerr=steCH, color='k', alpha=1, capsize=2)
subPlace[subPlot, 0].bar(
x_mag, avgMV, width=0.035, yerr=steMV, color='b', alpha=1, capsize=2)
subPlace[subPlot, 0].axvline(x=EV, linestyle='--', color='k', alpha=0.7)
plt.xlabel('safe magnitude')
plt.ylabel('reactionTime')
for x in x_mag:
subPlace[subPlot, 0].plot(
np.linspace(x, x, len(choiceDict[x])),
choiceDict[x],
'.',
color='k',
alpha=0.3)
subPlace[subPlot, 0].plot(
np.linspace(x, x, len(moveDict[x])),
moveDict[x],
'.',
color='k',
alpha=0.7)
subPlace[subPlot, 0].text(
max(x_mag) + (x_mag[-1] - x_mag[-2]), 0.25, str(situation.primary))
#%%
def plot_equivalents(softmaxDF, withFit=False):
from collections import Counter
import numpy as np
import pandas as pd
if ('sessionDate' in softmaxDF.columns) and (len(
softmaxDF.sessionDate.unique()) > 1):
for day in softmaxDF.sessionDate.unique():
# .sort_values(['primaryEV']))
plot_equivalents(softmaxDF.loc[softmaxDF.sessionDate == day])
else:
plt.rc('axes', axisbelow=True)
fig, axarr = plt.subplots(
1, len(softmaxDF.seqCode.unique()), squeeze=False, figsize=(8, 4))
for n, div in enumerate(softmaxDF.seqCode.unique()):
softmaxDF.loc[softmaxDF.seqCode == div].sort_values(
['primaryEV'], inplace=True)
mGroup = Counter(softmaxDF.loc[softmaxDF.seqCode == div]
.primary.apply(lambda x: x[2]))
# find the most common magnitude for the plotting
mGroup = mGroup.most_common(1)[0][0]
selectedDF = softmaxDF.loc[softmaxDF.seqCode == div].loc[
softmaxDF.primary.apply(lambda x: x[2]) == mGroup]
p = selectedDF.primary.apply(lambda x: x[3])
m = selectedDF.primary.apply(lambda x: x[2])
CE = selectedDF.equivalent
if ('sessionDate' in softmaxDF.loc[softmaxDF.seqCode == div]
.columns):
plt.suptitle(
softmaxDF.loc[softmaxDF.seqCode == div].sessionDate.apply(
lambda x: x.strftime("%Y-%m-%d")).unique().tolist()[0],
fontsize=16)
axarr[0, n].scatter(p, CE, color='b', alpha=0.7)
axarr[0, n].plot(
np.linspace(0, 1, 1000),
np.linspace(0, 0.5, 1000),
color='k',
linestyle='--')
# this sets the subplot's title
axarr[0, n].set_title(str(np.unique(selectedDF.division.tolist())))
# axarr.set_ylim(0, mGroup) #y axis length
# axarr.set_xlim(0, 1) #y axis length
# axarr.axis('scaled')
axarr[0, n].set_adjustable('datalim')
axarr[0, n].grid(b=True, which='major')
x0, x1 = axarr[0, n].get_xlim()
y0, y1 = axarr[0, n].get_ylim()
axarr[0, n].set_aspect((x1 - x0) / (y1 - y0))
#%%
def expand_softmax(softmaxDF):
'''
Expands a softmax dataframe so that each secondary option has its own row - as opposed to aggregating them
'''
if len(softmaxDF.loc[softmaxDF.secondary.apply(lambda x: len(x) > 1)]) == 0:
return softmaxDF # in case the softmaxDF is already in a expanded-like form
if 'sessionDate' not in softmaxDF:
softmaxDF['sessionDate'] = 0
softmaxDF['division'] = 0
dfs = []
count = 0
for date in tqdm(softmaxDF['sessionDate'].unique(), desc='Expanding DF'):
dateSM = softmaxDF.loc[softmaxDF['sessionDate'] == date]
miniDF = []
for _, row in dateSM.loc[dateSM.secondary.apply(
lambda x: len(x) > 1)].iterrows():
for i, secondary in enumerate(row.secondary):
count += 1
new_row = row.copy()
new_row['secondary'] = new_row.secondary[i]
new_row['secondaryEV'] = new_row.secondaryEV[i]
new_row['freq_sCh'] = new_row.freq_sCh[i]
new_row['no_of_Trials'] = new_row.no_of_Trials[i]
new_row['nTrials'] = new_row.nTrials
new_row['primarySide'] = {secondary[0]: new_row.primarySide[secondary[0]]}
new_row['choiceList'] = {secondary[0]: new_row.choiceList[secondary[0]]}
new_row['moveTime'] = {secondary[0]: new_row.moveTime[secondary[0]]}
new_row['choiceTimes'] = {secondary[0]: new_row.choiceTimes[secondary[0]]}
new_row['filteredRT'] = {secondary[0]: new_row.filteredRT[secondary[0]]}
new_row['trial_index'] = {secondary[0]: new_row.trial_index[secondary[0]]}
new_row['oClock'] = {secondary[0]: new_row.oClock[secondary[0]]}
miniDF.append(new_row)
dfs.append(pd.DataFrame(miniDF))
dfs.append(softmaxDF.loc[softmaxDF.secondary.apply(lambda x: len(x) == 1)])
softmaxDF = | pd.concat(dfs, ignore_index=True) | pandas.concat |
import sklearn
import sklearn.datasets
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from geolang.src.classifiers.bunch_data import bunch_training
def get_shape(arr):
'''
Return shape of an array
'''
array = np.array(arr)
return array.shape
def count_vectorize(training):
'''
Vectorizes the training text data.
Inputs:
training -- a Bunch object, utilize bunch_training method in bunch_data.py
Outputs:
X_train -- Numpy array of vectorized training data
count_vect -- count vectorizer object
tf_transformer -- transformer object
'''
training_df = | pd.DataFrame(training.data) | pandas.DataFrame |
from django.shortcuts import render, redirect
from django.contrib import messages
from sqlalchemy import inspect
import sqlalchemy
import pandas as pd
import ast
import numpy as np
from sqlalchemy.sql import exists
import xgboost as xgb
import plotly.express as px
import plotly.io as pio
import plotly.graph_objs as po
import plotly
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import MinMaxScaler
# function for processing the "lessons" dataframe
def lessons_process(lessons):
# turn strdict into dict, assign to same column variable
lessons['resources'] = [ast.literal_eval(x) for x in lessons['resources']]
dataframes = []
lesson_id_iter = iter(list(lessons['id']))
for row in lessons['resources']:
row_dict = {'content_id': [], 'channel_id': [], 'contentnode_id': [], }
try:
for diction in row:
keys = diction.keys()
for key in keys:
row_dict[key].append(diction[key])
dataframe = pd.DataFrame(row_dict)
dataframe['lesson_id'] = next(lesson_id_iter)
dataframes.append(dataframe)
except Exception as err:
print(err)
pass
dataframe_1 = dataframes[0]
for dataframe in dataframes[1:]:
dataframe_1 = pd.concat([dataframe_1, dataframe], axis=0)
final_merge = pd.merge(lessons, dataframe_1, left_on='id', right_on='lesson_id', how='inner')
final_merge['difficulty'] = [x.split()[1] if x != '' else np.NaN for x in final_merge['description']]
final_merge['subject'] = [x.split()[0] if x != '' else np.NaN for x in final_merge['description']]
return final_merge
# Create your views here.
def menu(request):
sql = """
SELECT *
FROM kolibriauth_facilityuser
"""
db_conn = sqlalchemy.create_engine('sqlite:///path\\to\\kolibri\\db.sqlite3')
local_db_conn = sqlalchemy.create_engine('sqlite:///db.sqlite3')
from sqlalchemy import MetaData
db_conn.connect()
request.session['user'] = []
facilusers = pd.read_sql(sql, db_conn)
users = [str(x) for x in facilusers['username']]
if local_db_conn.dialect.has_table(local_db_conn, "facilityuserstable"):
pass
else:
facilusers['survey'] = 0
facilusers.to_sql('facilityuserstable', local_db_conn, if_exists='replace')
if request.method == 'POST':
# if user == admin user
if request.POST['users'] == 'pn1eto':
request.session['user'] = request.POST['users']
return redirect('admin_dashboard/')
else:
request.session['user'] = request.POST['users']
print(facilusers)
messages.success(request, f'Hola, ahora estás en tu cuenta {str(request.POST["users"])}')
return redirect('dashboard/')
return render(request, 'menu.html', {'users': users, })
def dashboard(request):
localengine = sqlalchemy.create_engine('sqlite:///db.sqlite3')
sql = """
SELECT *
FROM facilityuserstable
"""
user_local = pd.read_sql(sql, localengine)
if int(user_local[user_local['username'] == str(request.session['user'])]['survey']) == 0:
return redirect('/survey')
else:
engine2 = sqlalchemy.create_engine('sqlite:///path\\to\\kolibri\\db.sqlite3')
sql = """
SELECT *
FROM kolibriauth_facilityuser
"""
facilusers = pd.read_sql(sql, engine2)
sql = """
SELECT *
FROM logger_contentsessionlog
"""
lessons_log = pd.read_sql(sql, engine2)
sql = """
SELECT *
FROM logger_contentsummarylog
"""
contentsummary = pd.read_sql(sql, engine2)
sql = """
SELECT *
FROM content_contentnode
where available = 1
"""
channelcont = pd.read_sql(sql, engine2)
sql = """
SELECT *
FROM logger_attemptlog
"""
attempts = pd.read_sql(sql, engine2)
sql = """
SELECT *
FROM lessons_lesson
"""
lessons = pd.read_sql(sql, engine2)
lessons = lessons_process(lessons)
lessons_meta = pd.merge(lessons_log, facilusers, right_on='id', left_on='user_id', how='inner')
lessons_meta = pd.merge(lessons_meta, channelcont, on='content_id', how='inner', )
lessons_meta = pd.merge(lessons_meta, contentsummary, on='content_id', how='inner', )
lessons_meta = pd.merge(lessons_meta, lessons, on='content_id', )
lessons_meta['video_loc'] = np.NaN
video_loc = [0 if x == '{}' else ast.literal_eval(x)['contentState']['savedLocation'] for x in
lessons_meta[lessons_meta['kind'] == 'video']['extra_fields_y']]
lessons_meta.loc[lessons_meta['kind'] == 'video', 'extra_fields_y'] = video_loc
materias = set([x for x in lessons_meta['subject'].dropna(axis=0)])
lessons_detailed = lessons.groupby('title').sum()
lessons_detailed = lessons_detailed.rename({'is_active': 'number_resources', }, axis='columns')
lessons_detailed = lessons_detailed.drop(['_morango_dirty_bit'], axis=1)
lessons_detailed = pd.merge(lessons_detailed,
lessons[['id', 'difficulty', 'subject', 'title']].drop_duplicates(subset='id'),
on='title'
, how='left')
# todo add user sorting
# todo remove changed or deleted lessons
lessons_meta = lessons_meta[lessons_meta.title_y != 'Segundo grado - Decenas y centenas']
lessons_meta_agg = lessons_meta.drop_duplicates(subset='id_y').groupby('title_y').sum()
lessons_detailed = | pd.merge(lessons_detailed, lessons_meta_agg, left_on='title', right_on='title_y', how='left') | pandas.merge |
import streamlit as st
import time
import pandas as pd
import numpy as np
import joblib
import PIL
from bokeh.models.widgets import Div
from sklearn.preprocessing import StandardScaler
import category_encoders as ce
import keras
import json
import nibabel as nib
import tensorflow as tf
from functions.utils import *
from functions.image_classification import *
from tensorflow.keras import backend as K
import base64
st.set_option('deprecation.showfileUploaderEncoding', False)
def main():
st.write('<style>div.Widget.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True)
activities = ["Home", "Stratifying Risks", "MRI Brain Tumor", "Breast Cancer", "Heart Disease", "Heart Monitor", "About"]
choice = st.sidebar.selectbox("Menu", activities)
# ============================== HOME ======================================================= #
if choice == "Home":
st.header("Hello Folks! \nI am your Doctor Health and my goal is to offer the best experience for you.\nI already have some functions that can be used to you monitoring your health.")
st.subheader("Features")
st.write("- Risk Stratification Using Electronic Health Records")
st.write("- Auto-Segmentation of Brain Tumor on Magnetic Resonance Imaging (MRI)")
st.write("- Detection of Breast Cancer Injuries")
st.write("- Heart Disease Prediction based on Age")
st.write("- Heart Monitoring")
image = PIL.Image.open("images/doctor-robot.png")
st.image(image,caption="")
# ============================== STRATIFYING RISKS ======================================================= #
if choice == "Stratifying Risks":
sub_activities = ["Predict"]
sub_choice = st.sidebar.selectbox("Action", sub_activities)
if sub_choice == "Predict":
df = getStratRiskFeatures()
uploaded_file = False
if st.checkbox('Want to upload data to predict?'):
uploaded_file = st.file_uploader("Choose a CSV file", type="csv")
if st.button('Make predictions'):
if uploaded_file:
df = pd.read_csv(uploaded_file, low_memory=False)
data = feature_engineering(df)
st.write(data)
else:
data = feature_engineering(df)
st.write(data)
ce = joblib.load('fe_stratifying_risks/models/ce_leave.pkl')
scaler = joblib.load('fe_stratifying_risks/models/scaler.pkl')
model = joblib.load('fe_stratifying_risks/models/modelo_lr.pkl')
data = ce.transform(data)
data = scaler.transform(data)
pred = model.predict_proba(data)[:,1]
# Dataframe com as probabilidades previstas (em dados de teste)
df_proba = pd.DataFrame(pred, columns = ['Probabilidade'])
# Dataframe para o risco estratificado
df_risco = | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
"""
Fundamental data interface
Created on 2018/09/07
@author: <NAME>
@contact: <EMAIL>
"""
from aushare.stock import cons as ct
import urllib.request
import json
from bs4 import BeautifulSoup
import pandas as pd
from io import StringIO
import csv
import html.parser
import time
from datetime import datetime
import os
import re
def getAllASXListCode():
url = ct.ASXLIST_FILE
if os.path.isfile(ct.ASXLIST_FILE_NAME):
dataFile = ct.ASXLIST_FILE_NAME
else:
data = urllib.request.urlopen(url).read().decode('ascii','ignore')
dataFile = StringIO(data)
df =pd.read_csv(dataFile,header=1)
return df['ASX code']
def getASXListName(code=None):
url = ct.ASXLIST_FILE
if os.path.isfile(ct.ASXLIST_FILE_NAME):
dataFile = ct.ASXLIST_FILE_NAME
else:
data = urllib.request.urlopen(url).read().decode('ascii','ignore')
dataFile = StringIO(data)
df =pd.read_csv(dataFile,header=1)
if (code ==None):
print(df['Company name'])
return df['Company name']
else:
print(df['Company name'][df['ASX code']==code])
return (df['Company name'][df['ASX code']==code])
def getASXListIndustry(code=None):
url = ct.ASXLIST_FILE
if os.path.isfile(ct.ASXLIST_FILE_NAME):
dataFile = ct.ASXLIST_FILE_NAME
else:
data = urllib.request.urlopen(url).read().decode('ascii','ignore')
dataFile = StringIO(data)
df =pd.read_csv(dataFile,header=1)
if (code ==None):
return df['GICS industry group']
else:
return df['GICS industry group'][df['ASX code']==code]
def getCompanyBasicInfo(code=None):
url = ct.ASXLIST_FILE
if os.path.isfile(ct.ASXLIST_FILE_NAME):
dataFile = ct.ASXLIST_FILE_NAME
else:
data = urllib.request.urlopen(url).read().decode('ascii','ignore')
dataFile = StringIO(data)
df =pd.read_csv(dataFile,header=1)
if (code ==None):
print(df['GICS industry group','Company name'])
return df['GICS industry group','Company name']
else:
print(df[['GICS industry group','Company name']][df['ASX code']==code])
return df[['GICS industry group','Company name']][df['ASX code']==code]
#get income from annual report in yahoo finance
def getRevenueDiff(code='APT'):
file_name = ct.REVENUE_FILE%code
try:
if os.path.isfile(file_name):
df = pd.read_csv(file_name,header=0, index_col =0)
else:
urlbase = ct.INCOME_ANNUAL_REPORT
url = urlbase%(code,code)
print(url)
response = urllib.request.urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(response, "lxml")
#tb = soup.find("table",attrs = {"data-reactid":"29"})
tb = soup.find("table")
df1 = pd.read_html(str(tb),header=0,index_col=0)
df =df1[0].T
df2 = | pd.to_numeric(df["Total Revenue"],downcast='float') | pandas.to_numeric |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
This example shows how to fit a model and evaluate its predictions.
"""
import pprint
from functools import partial
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import sys
sys.path.append('/scratch/project_2002244/benchmarks/packages') #
# %matplotlib inline
import mxnet as mx
from mxnet import gluon
import matplotlib.pyplot as plt
import json
import os
from tqdm.autonotebook import tqdm
from pathlib import Path
from gluonts.evaluation import Evaluator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.model.deepar import DeepAREstimator
# from gluonts.model.seq2seq import MQCNNEstimator
from gluonts.model.canonical import CanonicalRNNEstimator
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.model.gp_forecaster import GaussianProcessEstimator
from gluonts.model.lstnet import LSTNetEstimator
from gluonts.distribution.gaussian import GaussianOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.trainer import Trainer
from gluonts.dataset.common import ListDataset
from gluonts.dataset.field_names import FieldName
from gluonts.model.forecast import Config, OutputType
mx.random.seed(0)
np.random.seed(0)
def plot_prob_forecasts(ts_entry, forecast_entry, sample_id, prediction_length, plot_length, inline=True):
prediction_intervals = (50, 67, 95, 99)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
_, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax)
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
ax.axvline(ts_entry.index[-prediction_length], color='r')
plt.legend(legend, loc="upper left")
if inline:
plt.show()
plt.clf()
def get_custom_dataset(name, horizon):
"""
"""
if name=="electricity":
csv_path = r'/scratch/project_2002244/DeepAR/data/elect/electricity.csv'
df = pd.read_csv(csv_path, sep=",", index_col=0, parse_dates=True, decimal='.').astype(float)
df.fillna(0, inplace=True)
train_start = '2012-01-01 00:00:00'
train_end = '2014-05-26 23:00:00'
test_start = '2014-05-27 00:00:00'
test_end = '2014-12-31 23:00:00'
elif name=="europe_power_system":
csv_path = r'/scratch/project_2002244/DeepAR/data/elect/europe_power_system.csv'
df = pd.read_csv(csv_path, sep=",", index_col=0, parse_dates=True, decimal='.').astype(float)
df.fillna(0, inplace=True)
train_start = '2015-01-01 00:00:00'
train_end = '2017-06-23 23:00:00'
test_start = '2017-06-24 00:00:00'
test_end = '2017-11-30 23:00:00'
train_target_values = df[:train_end].T.values
test_target_values = df[:(pd.Timestamp(test_start)-timedelta(hours=1))].T.values
start_dates = np.array([pd.Timestamp(df.index[0], freq='1H') for _ in range(train_target_values.shape[0])])
train_ds = ListDataset([
{
FieldName.TARGET: target,
FieldName.START: start
}
for (target, start) in zip(train_target_values, start_dates)
], freq="1H")
test_ds = ListDataset([
{
FieldName.TARGET: target,
FieldName.START: start
}
for index in pd.date_range(start=( | pd.Timestamp(test_start) | pandas.Timestamp |
from nwb_conversion_tools.basedatainterface import BaseDataInterface
from nwb_conversion_tools.utils import get_schema_from_hdmf_class
from nwb_conversion_tools.json_schema_utils import get_base_schema
from pynwb import NWBFile, TimeSeries
from pynwb.device import Device
from pynwb.ogen import OptogeneticStimulusSite, OptogeneticSeries
from datetime import datetime, timedelta
from pathlib import Path
import pytz
import pandas as pd
import os
class LabviewDataInterface(BaseDataInterface):
"""Conversion class for Labview data."""
@classmethod
def get_source_schema(cls):
"""Return a partial JSON schema indicating the input arguments and their types."""
source_schema = super().get_source_schema()
source_schema.update(
required=[],
properties=dict(
dir_behavior_labview=dict(
type="string",
format="directory",
description="path to directory containing behavioral data"
)
)
)
return source_schema
def get_metadata_schema(self):
metadata_schema = super().get_metadata_schema()
# Ogen metadata schema
metadata_schema['properties']['Ogen'] = get_base_schema()
metadata_schema['properties']['Ogen']['properties'] = dict(
Device=get_schema_from_hdmf_class(Device),
OptogeneticStimulusSite=get_schema_from_hdmf_class(OptogeneticStimulusSite),
OptogeneticSeries=get_schema_from_hdmf_class(OptogeneticSeries)
)
return metadata_schema
def get_metadata(self):
# Get list of trial summary files
dir_behavior_labview = self.source_data['dir_behavior_labview']
all_files = os.listdir(dir_behavior_labview)
trials_files = [f for f in all_files if '_sum.txt' in f]
trials_files.sort()
# Get session_start_time from first file timestamps
labview_time_offset = datetime.strptime('01/01/1904 00:00:00', '%m/%d/%Y %H:%M:%S') # LabView timestamps offset
fpath = os.path.join(dir_behavior_labview, trials_files[0])
colnames = ['Trial', 'StartT', 'EndT', 'Result', 'InitT', 'SpecificResults',
'ProbLeft', 'OptoDur', 'LRew', 'RRew', 'InterT', 'LTrial',
'ReactionTime', 'OptoCond', 'OptoTrial']
df_0 = pd.read_csv(fpath, sep='\t', index_col=False, names=colnames)
t0 = df_0['StartT'][0] # initial time in Labview seconds
session_start_time = labview_time_offset + timedelta(seconds=t0)
session_start_time_tzaware = pytz.timezone('EST').localize(session_start_time)
metadata = dict(
NWBFile=dict(
session_start_time=session_start_time_tzaware.isoformat()
)
)
return metadata
def run_conversion(self, nwbfile: NWBFile, metadata: dict):
"""
Run conversion for this data interface.
Reads labview experiment behavioral data and adds it to nwbfile.
Parameters
----------
nwbfile : NWBFile
metadata : dict
"""
print("Converting Labview data...")
# Get list of trial summary files
dir_behavior_labview = self.source_data['dir_behavior_labview']
all_files = os.listdir(dir_behavior_labview)
trials_files = [f for f in all_files if '_sum.txt' in f]
trials_files.sort()
# Get session_start_time from first file timestamps
fpath = os.path.join(dir_behavior_labview, trials_files[0])
colnames = ['Trial', 'StartT', 'EndT', 'Result', 'InitT', 'SpecificResults',
'ProbLeft', 'OptoDur', 'LRew', 'RRew', 'InterT', 'LTrial',
'ReactionTime', 'OptoCond', 'OptoTrial']
df_0 = pd.read_csv(fpath, sep='\t', index_col=False, names=colnames)
t0 = df_0['StartT'][0] # initial time in Labview seconds
# Add trials
print("Converting Labview trials data...")
if nwbfile.trials is not None:
print('Trials already exist in current nwb file. Labview behavior trials not added.')
else:
# Make dataframe
frames = []
for f in trials_files:
fpath = os.path.join(dir_behavior_labview, f)
frames.append(pd.read_csv(fpath, sep='\t', index_col=False, names=colnames))
df_trials_summary = pd.concat(frames)
nwbfile.add_trial_column(
name='results',
description="0 means sucess (rewarded trial), 1 means licks during intitial "
"period, which leads to a failed trial. 2 means early lick failure. 3 means "
"wrong lick or no response."
)
nwbfile.add_trial_column(
name='init_t',
description="duration of initial delay period."
)
nwbfile.add_trial_column(
name='specific_results',
description="Possible outcomes classified based on raw data & meta file (_tr.m)."
)
nwbfile.add_trial_column(
name='prob_left',
description="probability for left trials in order to keep the number of "
"left and right trials balanced within the session. "
)
nwbfile.add_trial_column(
name='opto_dur',
description="the duration of optical stimulation."
)
nwbfile.add_trial_column(
name='l_rew_n',
description="counting the number of left rewards."
)
nwbfile.add_trial_column(
name='r_rew_n',
description="counting the number of rightrewards."
)
nwbfile.add_trial_column(
name='inter_t',
description="inter-trial delay period."
)
nwbfile.add_trial_column(
name='l_trial',
description="trial type (which side the air-puff is applied). 1 means "
"left-trial, 0 means right-trial"
)
nwbfile.add_trial_column(
name='reaction_time',
description="if it is a successful trial or wrong lick during response "
"period trial: ReactionTime = time between the first decision "
"lick and the beginning of the response period. If it is a failed "
"trial due to early licks: reaction time = the duration of "
"the air-puff period (in other words, when the animal licks "
"during the sample period)."
)
nwbfile.add_trial_column(
name='opto_cond',
description="0: no opto. 1: opto is on during sample period. "
"2: opto is on half way through the sample period (0.5s) "
"and 0.5 during the response period. 3. opto is on during "
"the response period."
)
nwbfile.add_trial_column(
name='opto_trial',
description="1: opto trials. 0: Non-opto trials."
)
for index, row in df_trials_summary.iterrows():
nwbfile.add_trial(
start_time=row['StartT'] - t0,
stop_time=row['EndT'] - t0,
results=int(row['Result']),
init_t=row['InitT'],
specific_results=int(row['SpecificResults']),
prob_left=row['ProbLeft'],
opto_dur=row['OptoDur'],
l_rew_n=int(row['LRew']),
r_rew_n=int(row['RRew']),
inter_t=row['InterT'],
l_trial=int(row['LTrial']),
reaction_time=int(row['ReactionTime']),
opto_cond=int(row['OptoCond']),
opto_trial=int(row['OptoTrial']),
)
# Get list of files: continuous data
continuous_files = [f.replace('_sum', '') for f in trials_files]
# Adds continuous behavioral data
frames = []
for f in continuous_files:
fpath_lick = os.path.join(dir_behavior_labview, f)
frames.append(pd.read_csv(fpath_lick, sep='\t', index_col=False))
df_continuous = | pd.concat(frames) | pandas.concat |
from os import path
import albumentations
import numpy as np
import pandas as pd
from PIL import Image, ImageOps
from skimage.feature import hog
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from tqdm import tqdm
from segmenter.symbol_segmenter import segment_image
from utils.image import img_to_binary
def generate_image_augmentation(image, count):
transform = albumentations.Compose([
albumentations.RandomScale(),
albumentations.Rotate(limit=(-15, 15)),
albumentations.Blur(blur_limit=5),
])
return [transform(image=image)['image'] for _ in range(count)]
def generate_features_dataset(classification_dataset_dir, augmentation_cont, progress=False):
"""
Parse the dataset in the input directory and extract `hog` features from them, also if the dataset is small
it will add more elements using augmentation
"""
if not path.exists(classification_dataset_dir):
raise FileNotFoundError(classification_dataset_dir)
if not path.isdir(classification_dataset_dir):
raise Exception(f"{classification_dataset_dir} is found but it is a file and not a directory")
tqdm_reading_images = None
tqdm_augmentation = None
tqdm_preprocessing = None
tqdm_feature_extraction = None
metadata_file = path.join(classification_dataset_dir, 'metadata.csv')
if not path.isfile(metadata_file):
raise FileNotFoundError(metadata_file)
# read the dataset
dataset = | pd.read_csv(metadata_file, header=None, dtype=str) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy.io.votable import parse
from sh import bzip2
from ...lib.context_managers import cd
# =============================================================================
# CONSTANTS
# =============================================================================
PATH = os.path.abspath(os.path.dirname(__file__))
CATALOG_PATH = os.path.join(PATH, "carpyncho_catalog.pkl")
# =============================================================================
# BUILD
# =============================================================================
def get_ogle_3_resume():
with cd(PATH):
bzip2("-f", "-dk", "ogleIII_all.csv.bz2")
df = pd.read_csv("ogleIII_all.csv")
ra = df["RA"].apply(
lambda d: d.replace(":", "h", 1).replace(":", "m", 1) + "s")
dec = df["Decl"].apply(
lambda d: d.replace(":", "d", 1).replace(":", "m", 1) + "s")
coords = SkyCoord(ra, dec, frame='icrs')
df['ra'] = pd.Series(coords.ra.deg, index=df.index)
df['dec'] = pd.Series(coords.dec.deg, index=df.index)
df["cls"] = df["Type"] + "-" + df["Subtype"]
df = df[["ID", "ra", "dec", "cls"]]
df["catalog"] = pd.Series("OGLE-3", index=df.index)
os.remove("ogleIII_all.csv")
return df
def get_ogle_4_resume():
with cd(PATH):
bzip2("-f", "-dk", "ogle4.csv.bz2")
df = pd.read_csv("ogle4.csv")
def _ra(d):
d = d.replace(":", "h", 1).replace(":", "m", 1)
return d.replace(":", ".") + "s"
ra = df["ra"].apply(_ra)
def _dec(d):
d = d.replace(":", "d", 1).replace(":", "m", 1)
return d.replace(":", ".") + "s"
dec = df["dec"].apply(_dec)
coords = SkyCoord(ra, dec, frame='icrs')
df['ra'] = pd.Series(coords.ra.deg, index=df.index)
df['dec'] = | pd.Series(coords.dec.deg, index=df.index) | pandas.Series |
"""local_mongo_sync_beta_app"""
#code='local_mongo_sync_beta_app'
#mongo_string='mongodb://root:[email protected]:27017/?authSource=admin&readPreference=primary&appname=MongoDB%20Compass&ssl=false'
#es_host = 'localhost:9200'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
#auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
"""local_mongo_sync_prod_app"""
#code='local_mongo_sync_prod_app'
#mongo_string='mongodb://root:[email protected]:27017/admin?authSource=admin&readPreference=primary&appname=MongoDB%20Compass%20Community&ssl=false'
#es_host = 'localhost:9200'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
#auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
"""dev_env_mongo_sync_beta_app"""
code='dev_env_mongo_sync_beta_app'
mongo_string='mongodb://root:[email protected]:27017/?authSource=admin&readPreference=primary&appname=MongoDB%20Compass&ssl=false'
es_host = 'elastic-helm-elasticsearch-coordinating-only'
from elasticsearch import Elasticsearch
es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
#
"""beta_env_mongo_sync_prod_app"""
#code='beta_env_mongo_sync_prod_app'
#mongo_string='mongodb://root:[email protected]:27017/admin?authSource=admin&readPreference=primary&appname=MongoDB%20Compass%20Community&ssl=false'
#es_host = 'elastic-helm-elasticsearch-coordinating-only'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
#auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
"""prod_env_mongo_sync_prod_app"""
#code='prod_env_mongo_sync_prod_app'
#mongo_string='mongodb://root:eXVB5mbmuZ@bighaat-mongo-mongodb-0.bighaat-mongo-mongodb-headless.kisanvedika.svc.cluster.local:27017,bighaat-mongo-mongodb-1.bighaat-mongo-mongodb-headless.kisanvedika.svc.cluster.local:27017,bighaat-mongo-mongodb-2.bighaat-mongo-mongodb-headless.kisanvedika.svc.cluster.local:27017/admin?authSource=admin&compressors=disabled&gssapiServiceName=mongodb'
#es_host = 'elastic-helm-elasticsearch-coordinating-only'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
#auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
from pymongo import MongoClient
import datetime
import time
from elasticsearch import Elasticsearch
from bson.objectid import ObjectId
from elasticsearch.helpers import scan
#es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
#es = Elasticsearch("elastic-helm-elasticsearch-coordinating-only")
#es = Elasticsearch("https://elastic:[email protected]:9200/", verify_certs=False, connection_class=RequestsHttpConnection)
#from pandas.io.json import json_normalize
import pandas as pd
import re
import numpy as np
from es_pandas import es_pandas
import warnings
warnings.filterwarnings("ignore")
#es_host = 'elastic-helm-elasticsearch-coordinating-only'
mongo_client = MongoClient(mongo_string)
db = mongo_client.bighaat
import json
import requests
def big_read(index):
es_response = scan(
es,
index=index,
doc_type='_doc',
query={"query": { "match_all" : {}}}
)
return es_response
def logger(message):
message_dict={'code':code,
'message':message}
r=requests.post('https://apibeta.bighaat.com/crop/api/logerror/create-error-log?message={}&api-version=1.0'.format(str(message_dict)),headers=auth)
return r.text
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, datetime.datetime):
return str(o)
return json.JSONEncoder.default(self, o)
#try:
# start_time = time.time()
# db = mongo_client.bighaat
# col = db.posts
# cursor = col.find()
# posts = list(cursor)
# status=[]
# for node in posts:
# _id = str(node['_id']).split("'")[0]
# node.pop('_id')
# res=es.index(index='posts_sync',id=_id,body=JSONEncoder().encode(node))
# status.append(res['_id'])
# print("Posts--- %s seconds ---" % (time.time() - start_time_posts))
# logger("Posts--- %s seconds ---" % (time.time() - start_time_posts))
#
# col = db.comments
# cursor = col.find()
# comments = list(cursor)
# status=[]
# for node in comments:
# _id = str(node['_id']).split("'")[0]
# node.pop('_id')
# res=es.index(index='comments_sync',id=_id,body=JSONEncoder().encode(node))
# status.append(res['_id'])
#
# col = db.crops
# cursor = col.find()
# crops = list(cursor)
# status=[]
# for node in crops:
# _id = str(node['_id']).split("'")[0]
# node.pop('_id')
# res=es.index(index='crops_sync',id=_id,body=JSONEncoder().encode(node))
# status.append(res['_id'])
#
# col = db.crop_doctor
# cursor = col.find()
# crops = list(cursor)
# status=[]
# for node in crops:
# _id = str(node['_id']).split("'")[0]
# node.pop('_id')
# res=es.index(index='crop_doc_sync',id=_id,body=JSONEncoder().encode(node))
# status.append(res['_id'])
# print('Done')
# print("--- %s Mongo Init seconds ---" % (time.time() - start_time))
# logger("--- %s Mongo Init seconds ---" % (time.time() - start_time))
#except Exception as e:
# logger('Error in Mongo Init '+str(e))
# print('Error in Mongo Init '+str(e))
while(1):
try:
start_time = time.time()
start_time_posts = time.time()
col = db.posts
cursor = col.find()
posts = list(cursor)
print(time.time()-start_time_posts)
posts_list=[]
for node in posts:
pass
posts_list.append(json.loads(JSONEncoder().encode(node)))
posts=pd.DataFrame(posts_list)
print("Posts--- %s seconds ---" % (time.time() - start_time_posts))
message="Posts--- %s seconds ---" % (time.time() - start_time_posts)
logger(message)
start_time_users = time.time()
col = db.users
cursor = col.find()
users = list(cursor)
print(time.time()-start_time_users)
users_list=[]
for node in users:
pass
users_list.append(json.loads(JSONEncoder().encode(node)))
users=pd.DataFrame(users_list)
print("Users--- %s seconds ---" % (time.time() - start_time_users))
message="Users--- %s seconds ---" % (time.time() - start_time_users)
logger(message)
start_time_comments = time.time()
col = db.comments
cursor = col.find()
comments = list(cursor)
print(time.time()-start_time_comments)
comments_list=[]
for node in comments:
pass
comments_list.append(json.loads(JSONEncoder().encode(node)))
comments=pd.DataFrame(comments_list)
comments_df=comments.set_index('_id')[['postId','userId','comment','isDeleted']]
index = 'comments_sync'
doc_type = 'vec'
ep = es_pandas(es_host)
ep.init_es_tmpl(comments_df, doc_type)
ep.to_es(comments_df, index, doc_type=doc_type, use_index=True)
print("Comments--- %s seconds ---" % (time.time() - start_time_comments))
message="Comments--- %s seconds ---" % (time.time() - start_time_comments)
logger(message)
start_time_crop_doctor = time.time()
col = db.crop_doctor
cursor = col.find()
crop_doctor = list(cursor)
print(time.time()-start_time_crop_doctor)
crop_doctor_list=[]
for node in crop_doctor:
pass
crop_doctor_list.append(json.loads(JSONEncoder().encode(node)))
crop_doctor=pd.DataFrame(crop_doctor_list)
print("Crop_doctor--- %s seconds ---" % (time.time() - start_time_crop_doctor))
message="Crop_doctor--- %s seconds ---" % (time.time() - start_time_crop_doctor)
logger(message)
start_time_crops = time.time()
col = db.crops
cursor = col.find()
crops = list(cursor)
print(time.time()-start_time_crops)
crops_list=[]
for node in crops:
pass
crops_list.append(json.loads(JSONEncoder().encode(node)))
crops= | pd.DataFrame(crops_list) | pandas.DataFrame |
""""""
__author__ = "<NAME>"
__copyright__ = "WeatherBrain"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pandas
def load_temperature_raw():
"""This methid loads the raw temperature data from
text files and recompiled the data into a dataframe.
:return: Dataframe with temperature data.
"""
# with open(r'Data\\Raw Data\\Temperature 1756-1858.txt') as f:
# data = f.readlines()
# with open(r'Data\\Raw Data\\Temperature 1859-1960.txt') as f:
# data += f.readlines()
# with open(r'Data\\Raw Data\\Temperature 1961-2012.txt') as f:
# data += f.readlines()
# with open(r'Data\\Raw Data\\Temperature 2013-2017.txt') as f:
# data += f.readlines()
with open(r'..\\Data\\Raw Data\\Non-homogenized SLP series in hPa.txt') as f:
data = f.readlines()
with open(r'..\\Data\\Raw Data\\2013-2017, hPa, automatic station.txt') as f:
data += f.readlines()
data_length = len(data)
result = []
for data_index, row in enumerate(data):
row = row.replace('\n', '').split(' ')
row = [x for x in row if x != '']
date = | pandas.to_datetime(row[0] + '-' + row[1] + '-' + row[2]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 31 23:45:55 2021
@author: dv516
"""
import numpy as np
import pickle
import pyro
pyro.enable_validation(True) # can help with debugging
pyro.set_rng_seed(1)
from algorithms.PyBobyqa_wrapped.Wrapper_for_pybobyqa import PyBobyqaWrapper
from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt
from algorithms.nesterov_random.nesterov_random import nesterov_random
from algorithms.simplex.simplex_method import simplex_method
from algorithms.CUATRO.CUATRO import CUATRO
from algorithms.Finite_differences.Finite_differences import finite_Diff_Newton
from algorithms.Finite_differences.Finite_differences import Adam_optimizer
from algorithms.Finite_differences.Finite_differences import BFGS_optimizer
from algorithms.SQSnobfit_wrapped.Wrapper_for_SQSnobfit import SQSnobFitWrapper
from algorithms.DIRECT_wrapped.Wrapper_for_Direct import DIRECTWrapper
from case_studies.RTO.systems import *
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import pandas as pd
import pickle
def average_from_list(solutions_list):
N = len(solutions_list)
f_best_all = np.zeros((N, 100))
for i in range(N):
f_best = np.array(solutions_list[i]['f_best_so_far'])
x_ind = np.array(solutions_list[i]['samples_at_iteration'])
for j in range(100):
ind = np.where(x_ind <= j+1)
if len(ind[0]) == 0:
f_best_all[i, j] = f_best[0]
else:
f_best_all[i, j] = f_best[ind][-1]
f_median = np.median(f_best_all, axis = 0)
# f_av = np.average(f_best_all, axis = 0)
# f_std = np.std(f_best_all, axis = 0)
f_min = np.min(f_best_all, axis = 0)
f_max = np.max(f_best_all, axis = 0)
return f_best_all, f_median, f_min, f_max
def RTO_Noise(x, noise, N_SAA):
plant = WO_system()
f = plant.WO_obj_sys_ca
g1 = plant.WO_con1_sys_ca
g2 = plant.WO_con2_sys_ca
f_SAA = 0
g1_SAA, g2_SAA = - np.inf, - np.inf
for i in range(N_SAA):
f_SAA += (f(x) + 5e-1 * np.random.normal(0., noise))/N_SAA
g1_SAA = max(g1_SAA, g1(x) + 5e-4 * np.random.normal(0., noise))
g2_SAA = max(g2_SAA, g2(x) + 5e-4 * np.random.normal(0., noise))
return f_SAA, [g1_SAA, g2_SAA]
n_noise = 6
noise_mat = np.zeros(n_noise)
for i in range(n_noise):
noise_mat[i] = 1/3*i
x0 = [6.9, 83]
bounds = np.array([[4., 7.], [70., 100.]])
max_f_eval = 50 ; N_SAA = 1
# max_f_eval = 25 ; N_SAA = 2
max_it = 100
#CUATRO local, CUATRO global, SQSnobFit, Bayes
N_samples = 20
RTONoise_list_SQSF = []
RTOConstraint_list_SQSF = []
for i in range(n_noise):
print('Iteration ', i+1, ' of SQSnobfit')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: RTO_Noise(x, noise_mat[i], N_SAA)
sol = SQSnobFitWrapper().solve(f, x0, bounds, mu_con = 1e6, \
maxfun = max_f_eval, constraints=2)
best.append(sol['f_best_so_far'][-1])
_, g = RTO_Noise(sol['x_best_so_far'][-1], 0, N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RTONoise_list_SQSF.append(best)
RTOConstraint_list_SQSF.append(best_constr)
# N_SAA = 1
N_samples = 20
RTONoise_list_CUATROl = []
RTOConstraint_list_CUATROl = []
for i in range(n_noise):
print('Iteration ', i+1, ' of CUATRO_l')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: RTO_Noise(x, noise_mat[i], N_SAA)
sol = CUATRO(f, x0, 2, bounds = bounds, max_f_eval = max_f_eval, \
N_min_samples = 6, tolerance = 1e-10,\
beta_red = 0.9, rnd = j, method = 'local', \
constr_handling = 'Fitting')
best.append(sol['f_best_so_far'][-1])
_, g = RTO_Noise(sol['x_best_so_far'][-1], 0, N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RTONoise_list_CUATROl.append(best)
RTOConstraint_list_CUATROl.append(best_constr)
# N_SAA = 1
N_samples = 20
RTONoise_list_CUATROg = []
RTOConstraint_list_CUATROg = []
init_radius = 10
for i in range(n_noise):
print('Iteration ', i+1, ' of CUATRO_g')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: RTO_Noise(x, noise_mat[i], N_SAA)
sol = CUATRO(f, x0, init_radius, bounds = bounds, max_f_eval = max_f_eval, \
N_min_samples = 15, tolerance = 1e-10,\
beta_red = 0.9, rnd = j, method = 'global', \
constr_handling = 'Discrimination')
best.append(sol['f_best_so_far'][-1])
_, g = RTO_Noise(sol['x_best_so_far'][-1], 0, N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RTONoise_list_CUATROg.append(best)
RTOConstraint_list_CUATROg.append(best_constr)
with open('BayesRTO_listNoiseConv.pickle', 'rb') as handle:
RTONoise_list_Bayes = pickle.load(handle)
with open('BayesRTO_listNoiseConstr.pickle', 'rb') as handle:
RTOConstraint_list_Bayes = pickle.load(handle)
noise = ['%.3f' % noise_mat[i] for i in range(n_noise)]
noise_labels = [[noise[i]]*N_samples for i in range(n_noise)]
convergence = list(itertools.chain(*RTONoise_list_SQSF)) + \
list(itertools.chain(*RTONoise_list_CUATROl)) + \
list(itertools.chain(*RTONoise_list_CUATROg)) + \
list(itertools.chain(*RTONoise_list_Bayes))
constraints = list(itertools.chain(*RTOConstraint_list_SQSF)) + \
list(itertools.chain(*RTOConstraint_list_CUATROl)) + \
list(itertools.chain(*RTOConstraint_list_CUATROg)) + \
list(itertools.chain(*RTOConstraint_list_Bayes))
noise = list(itertools.chain(*noise_labels))*4
method = ['Snobfit']*int(len(noise)/4) + ['CUATRO_l']*int(len(noise)/4) + \
['CUATRO_g']*int(len(noise)/4) + ['Bayes. Opt.']*int(len(noise)/4)
data = {'Best function evaluation': convergence, \
"Constraint violation": constraints, \
"Noise standard deviation": noise, \
'Method': method}
df = pd.DataFrame(data)
plt.rcParams["font.family"] = "Times New Roman"
ft = int(15)
font = {'size': ft}
plt.rc('font', **font)
params = {'legend.fontsize': 12.5,
'legend.handlelength': 1.2}
plt.rcParams.update(params)
ax = sns.boxplot(x = "Noise standard deviation", y = "Best function evaluation", hue = "Method", data = df, palette = "muted")
# plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.legend([])
# plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
# mode="expand", borderaxespad=0, ncol=4)
plt.tight_layout()
plt.savefig('Publication plots/RTO_feval50Convergence.svg', format = "svg")
plt.show()
# ax.set_ylim([0.1, 10])
# ax.set_yscale("log")
plt.clf()
min_list = np.array([np.min([np.min(RTONoise_list_SQSF[i]),
np.min(RTONoise_list_CUATROl[i]),
np.min(RTONoise_list_CUATROg[i]),
np.min(RTONoise_list_Bayes[i])]) for i in range(n_noise)])
convergence_test = list(itertools.chain(*np.array(RTONoise_list_SQSF) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(RTONoise_list_CUATROl) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(RTONoise_list_CUATROg) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(RTONoise_list_Bayes) - min_list.reshape(6,1)))
data_test = {'Best function evaluation': convergence_test, \
"Constraint violation": constraints, \
"Noise standard deviation": noise, \
'Method': method}
df_test = pd.DataFrame(data_test)
ax = sns.boxplot(x = "Noise standard deviation", y = 'Best function evaluation', hue = "Method", data = df_test, palette = "muted")
# plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
# plt.legend([])
plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=4)
plt.tight_layout()
plt.ylabel(r'$f_{best, sample}$ - $f_{opt, noise}$')
plt.savefig('Publication plots/RTO_feval50ConvergenceLabel.svg', format = "svg")
plt.show()
plt.clf()
ax = sns.boxplot(x = "Noise standard deviation", y = "Constraint violation", \
hue = "Method", data = df, palette = "muted", fliersize = 0)
ax = sns.stripplot(x = "Noise standard deviation", y = "Constraint violation", \
hue = "Method", data = df, palette = "muted", dodge = True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.tight_layout()
plt.savefig('Publication plots/RTO_feval50Constraints.svg', format = "svg")
plt.show()
plt.clf()
max_f_eval = 25 ; N_SAA = 2
max_it = 100
#CUATRO local, CUATRO global, SQSnobFit, Bayes
N_samples = 20
RTOSAANoise_list_SQSF = []
RTOSAAConstraint_list_SQSF = []
for i in range(n_noise):
print('Iteration ', i+1, ' of SQSnobfit')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: RTO_Noise(x, noise_mat[i], N_SAA)
sol = SQSnobFitWrapper().solve(f, x0, bounds, mu_con = 1e6, \
maxfun = max_f_eval, constraints=2)
best.append(sol['f_best_so_far'][-1])
_, g = RTO_Noise(sol['x_best_so_far'][-1], 0, N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RTOSAANoise_list_SQSF.append(best)
RTOSAAConstraint_list_SQSF.append(best_constr)
# N_SAA = 1
N_samples = 20
RTOSAANoise_list_CUATROl = []
RTOSAAConstraint_list_CUATROl = []
for i in range(n_noise):
print('Iteration ', i+1, ' of CUATRO_l')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: RTO_Noise(x, noise_mat[i], N_SAA)
sol = CUATRO(f, x0, 2, bounds = bounds, max_f_eval = max_f_eval, \
N_min_samples = 6, tolerance = 1e-10,\
beta_red = 0.9, rnd = j, method = 'local', \
constr_handling = 'Fitting')
best.append(sol['f_best_so_far'][-1])
_, g = RTO_Noise(sol['x_best_so_far'][-1], 0, N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RTOSAANoise_list_CUATROl.append(best)
RTOSAAConstraint_list_CUATROl.append(best_constr)
# N_SAA = 1
N_samples = 20
RTOSAANoise_list_CUATROg = []
RTOSAAConstraint_list_CUATROg = []
init_radius = 10
for i in range(n_noise):
print('Iteration ', i+1, ' of CUATRO_g')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: RTO_Noise(x, noise_mat[i], N_SAA)
sol = CUATRO(f, x0, init_radius, bounds = bounds, max_f_eval = max_f_eval, \
N_min_samples = 15, tolerance = 1e-10,\
beta_red = 0.9, rnd = j, method = 'global', \
constr_handling = 'Discrimination')
best.append(sol['f_best_so_far'][-1])
_, g = RTO_Noise(sol['x_best_so_far'][-1], 0, N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RTOSAANoise_list_CUATROg.append(best)
RTOSAAConstraint_list_CUATROg.append(best_constr)
with open('BayesRTO_listNoiseConvSAA.pickle', 'rb') as handle:
RTOSAANoise_list_Bayes = pickle.load(handle)
with open('BayesRTO_listNoiseConstrSAA.pickle', 'rb') as handle:
RTOSAAConstraint_list_Bayes = pickle.load(handle)
noise = ['%.3f' % noise_mat[i] for i in range(n_noise)]
noise_labels = [[noise[i]]*N_samples for i in range(n_noise)]
convergence = list(itertools.chain(*RTOSAANoise_list_SQSF)) + \
list(itertools.chain(*RTOSAANoise_list_CUATROl)) + \
list(itertools.chain(*RTOSAANoise_list_CUATROg)) + \
list(itertools.chain(*RTOSAANoise_list_Bayes))
constraints = list(itertools.chain(*RTOSAAConstraint_list_SQSF)) + \
list(itertools.chain(*RTOSAAConstraint_list_CUATROl)) + \
list(itertools.chain(*RTOSAAConstraint_list_CUATROg)) + \
list(itertools.chain(*RTOSAAConstraint_list_Bayes))
noise = list(itertools.chain(*noise_labels))*4
method = ['Snobfit']*int(len(noise)/4) + ['CUATRO_l']*int(len(noise)/4) + \
['CUATRO_g']*int(len(noise)/4) + ['Bayes. Opt.']*int(len(noise)/4)
data = {'Best function evaluation': convergence, \
"Constraint violation": constraints, \
"Noise standard deviation": noise, \
'Method': method}
df = pd.DataFrame(data)
plt.rcParams["font.family"] = "Times New Roman"
ft = int(15)
font = {'size': ft}
plt.rc('font', **font)
params = {'legend.fontsize': 12.5,
'legend.handlelength': 1.2}
plt.rcParams.update(params)
ax = sns.boxplot(x = "Noise standard deviation", y = "Best function evaluation", hue = "Method", data = df, palette = "muted")
# plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.legend([])
# plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
# mode="expand", borderaxespad=0, ncol=4)
plt.tight_layout()
plt.savefig('Publication plots/RTO_SAA2feval25Convergence.svg', format = "svg")
plt.show()
# ax.set_ylim([0.1, 10])
# ax.set_yscale("log")
plt.clf()
min_list = np.array([np.min([np.min(RTOSAANoise_list_SQSF[i]),
np.min(RTOSAANoise_list_CUATROl[i]),
np.min(RTOSAANoise_list_CUATROg[i]),
np.min(RTOSAANoise_list_Bayes[i])]) for i in range(n_noise)])
convergence_test = list(itertools.chain(*np.array(RTOSAANoise_list_SQSF) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(RTOSAANoise_list_CUATROl) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(RTOSAANoise_list_CUATROg) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(RTOSAANoise_list_Bayes) - min_list.reshape(6,1)))
data_test = {'Best function evaluation': convergence_test, \
"Constraint violation": constraints, \
"Noise standard deviation": noise, \
'Method': method}
df_test = | pd.DataFrame(data_test) | pandas.DataFrame |
from collections import defaultdict, Counter
import scipy.sparse
import numpy as np
import pandas as pd
import os
from natsort import natsorted
from tqdm import tqdm_notebook as tqdn
import Levenshtein
import itertools
import ops.utils
from ops.constants import *
num_cores = 4
# LOAD TABLES
def validate_design(df_design):
if 0 in df_design['dialout'].values:
raise ValueError('dialout primers are one-indexed; value of 0 in "dialout" column is invalid.')
for group, df in df_design.groupby('group'):
x = df.drop_duplicates(['prefix_length', 'edit_distance'])
if len(x) > 1:
cols = ['group', 'gene_design', 'sgRNA_design',
'prefix_length', 'edit_distance']
error = 'multiple prefix specifications for group {0}:\n{1}'
raise ValueError(error.format(group, df[cols]))
return df_design
def load_gene_list(filename,gene_id=GENE_ID,dtype=None):
return (pd.read_csv(filename, header=None, dtype=dtype)
.assign(design=os.path.splitext(filename)[0])
.rename(columns={0: gene_id})
)
def validate_genes(df_genes, df_sgRNAs,gene_id=GENE_ID):
assert all(x == x.upper() for x in df_sgRNAs[SGRNA])
missing = set(df_genes[gene_id]) - set(df_sgRNAs[gene_id])
if missing:
error = '{0} gene ids missing from sgRNA table: {1}'
missing_ids = ', '.join(map(str, missing))
raise ValueError(error.format(len(missing), missing_ids))
duplicates = df_genes[[SUBPOOL, gene_id]].duplicated(keep=False)
if duplicates.any():
error = 'duplicate genes for the same subpool: {0}'
xs = df_genes.loc[duplicates, [SUBPOOL, gene_id]].values
raise ValueError(error.format(xs))
return df_genes
def design_gene_symbol(df_design_gene_id,df_gene_symbol=pd.DataFrame()):
if df_gene_symbol.empty:
df_gene_symbol = df_design_gene_id
df_gene_symbol = (df_gene_symbol
.drop_duplicates('gene_id')
[['gene_id','gene_symbol']]
)
def parse_gene_id(design_gene_id):
return natsorted([int(id) for id in design_gene_id.split('&')])
df_design_gene_id['design_gene_symbol'] = (df_design_gene_id['design_gene_id']
.apply(lambda x: '&'.join(df_gene_symbol
.query('gene_id == {}'.format(str(parse_gene_id(x))))
['gene_symbol']
.tolist()
))
)
return df_design_gene_id
def multiple_targets(df_sgRNAs):
return df_sgRNAs.duplicated(subset=['sgRNA'],keep=False).astype('int')
# SELECT GUIDES
def select_prefix_group(df_genes, df_sgRNAs, extra_cols=None):
"""Selects sgRNAs within each prefix group.
`df_genes`: Genes requested for each group, one row per gene.
Group properties (prefix length, edit distance) are included as columns.
`df_sgRNAs`: sgRNAs available for each gene.
`shared_cols`: Used to join genes and sgRNAs. Default is by gene ID;
other columns can be included to restrict available sgRNAs for a
given gene.
"""
# doesn't shortcut if some genes need less guides
prefix_length, edit_distance = (
df_genes[[PREFIX_LENGTH, EDIT_DISTANCE]].values[0])
join_cols = [GENE_ID]
if extra_cols is not None:
join_cols += list(extra_cols)
# ops.df_sgRNAs = df_sgRNAs.copy()
# ops.df_genes = df_genes.copy()
# x = (df_sgRNAs
# .reset_index(drop=True)
# .join(df_genes.set_index(join_cols), on=join_cols, how='inner')
# .sort_values([SUBPOOL, GENE_ID, RANK]))
# assert False
return (df_sgRNAs
.join(df_genes.set_index(join_cols), on=join_cols, how='inner')
.sort_values([SUBPOOL, GENE_ID, RANK])
.pipe(select_guides, prefix_length, edit_distance)
.sort_values([SUBPOOL, GENE_ID, RANK])
.assign(selected_rank=lambda x:
ops.utils.rank_by_order(x, [SUBPOOL, GENE_ID]))
.query('selected_rank <= sgRNAs_per_gene')
.sort_values([SUBPOOL, GENE_ID, 'selected_rank'])
.drop(['selected_rank'], axis=1)
)
def select_guides(df_input, prefix_length, edit_distance, gene_id=GENE_ID, priority=[RANK], n_cores=-2):
"""`df_input` has gene_id, sgRNAs_per_gene
priority is for priority within a gene id
"""
if edit_distance == 1:
selected_guides = (df_input
.assign(prefix=lambda x: x['sgRNA'].str[:prefix_length])
.pipe(lambda x: x.join(x[GENE_ID].value_counts().rename('sgRNAs_per_id'),
on=GENE_ID))
.sort_values([RANK, 'sgRNAs_per_id'])
.drop_duplicates('prefix')
[SGRNA].pipe(list)
)
elif edit_distance == 2:
sequences = df_input['sgRNA']
group_ids = df_input['gene_id']
index = select_prefixes_edit_distance(sequences, group_ids,
prefix_length, edit_distance)
selected_guides = df_input.iloc[index][SGRNA].pipe(list)
else:
# TODO: prefix edit distance > 2
error = 'edit distance {} not implemented'.format(edit_distance)
raise NotImplementedError(error)
return df_input.query(loc('{SGRNA} == @selected_guides'))
def parallel_levenshtein_group(group, dist_func=None, n_cores=-2):
remainders = [group[i+1:] for i,_ in enumerate(group)]
if not dist_func:
dist_func = Levenshtein.distance
def measure_distances(string,remainder):
arr = []
for test_string in remainder:
d = dist_func(string,test_string)
if d<2:
print(string,test_string)
arr.append(d)
return arr
from joblib import Parallel, delayed
results = Parallel(n_cores)(delayed(measure_distances)(*subset)
for subset
in tqdn(zip(group,remainders),total=len(group)))
distances = []
for result in results:
distances.extend(result)
return distances
def add_barcodes(df_sgRNAs, df_barcodes):
d = {}
for L, df in df_barcodes.groupby('L'):
for col in df.filter(like='k_'):
k = int(col.split('_')[1])
barcodes = (df.query(col)['barcode']
.sample(frac=1, random_state=0))
d[(L, k)] = itertools.cycle(barcodes)
it = df_sgRNAs[['prefix_length', 'edit_distance']].values
barcodes = [next(d[(L, k)]) for L, k in it]
df_sgRNAs = df_sgRNAs.assign(barcode=barcodes)
assert (~df_sgRNAs
.duplicated(subset=['group', 'barcode']).any())
return df_sgRNAs
# FILTER SGRNAS
def filter_sgRNAs(df_sgRNAs, homopolymer=5):
cut = [has_homopolymer(x, homopolymer) or has_BsmBI_site(x)
for x in df_sgRNAs[SGRNA]]
return df_sgRNAs[~np.array(cut)]
def has_homopolymer(x, n):
a = 'A'*n in x
t = 'T'*n in x
g = 'G'*n in x
c = 'C'*n in x
return a | t | g | c
def has_BsmBI_site(x):
x = 'CACCG' + x.upper() + 'GTTT'
return 'CGTCTC' in x or 'GAGACG' in x
def has_BbsI_site(x):
x = 'CACCG' + x.upper() + 'GTTT'
return 'GAAGAC' in x or 'GTCTTC' in x
# OLIGOS
def get_sgRNA_prefixes(df_oligos):
it = df_oligos[['sgRNA', 'prefix_length']].values
return [sgRNA[:prefix_length]
for sgRNA, prefix_length in it]
def build_sgRNA_oligos(df, dialout_primers,
left='CGTCTCg{u6}', right='GTTTcGAGACG',
u6='east'):
if '{u6}' in left:
if u6 == 'east':
u6_3prime = 'CACCg'
elif u6 == 'west':
u6_3prime = 'GTTG'
elif u6 == 'west_v2':
u6_3prime = 'caccTTGTTG'
else:
raise ValueError(u6)
left = left.format(u6=u6_3prime)
template = '{fwd}{left}{sgRNA}{right}{rev}'
arr = []
for s, d in df[[SGRNA, DIALOUT]].values:
# one-indexed
fwd, rev = dialout_primers[d - 1]
rev = reverse_complement(rev)
oligo = template.format(fwd=fwd, rev=rev, sgRNA=s,
left=left, right=right)
arr += [oligo]
return arr
def build_two_step_oligos(df, dialout_primers, order, u6='east'):
"""Default order is for lentiGuide-BC.
"""
if u6 == 'west':
u6_3prime = 'GTTG'
elif u6 == 'east':
u6_3prime = 'CACCg'
if order == 'lentiGuide-BC':
left='CGTCTCc{u6}'.format(u6=u6_3prime)
middle='gtttNNgtcttcNNNNNNgaagacNNttcc'
right='actgCgagacg'
template = '{fwd}{left}{sgRNA}{middle}{barcode}{right}{rev}'
elif order == 'barcode-guide':
left = 'CGTCTCcTTCC'
right = 'gtttCgagacg'
middle = 'actgNNgtcttcNNNNNNgaagacNN{u6}'.format(u6=u6_3prime)
template = '{fwd}{left}{barcode}{middle}{sgRNA}{right}{rev}'
else:
raise ValueError('order not recognized')
arr = []
for sgRNA, barcode, dialout in df[[SGRNA, BARCODE, DIALOUT]].values:
# one-indexed
fwd, rev = dialout_primers[dialout - 1]
rev = reverse_complement(rev)
oligo = template.format(fwd=fwd.lower(), rev=rev, sgRNA=sgRNA,
barcode=barcode, left=left, middle=middle, right=right)
arr += [oligo]
return arr
def build_test(df_oligos, dialout_primers):
"""Pattern-match sgRNA cloning and dialout primers.
"""
sites = 'CGTCTC', reverse_complement('CGTCTC')
pat = ('(?P<dialout_fwd>.*){fwd}.CACCG'
'(?P<sgRNA_cloned>.*)'
'GTTT.{rev}(?P<dialout_rev>.*)')
pat = pat.format(fwd=sites[0], rev=sites[1])
kosuri = {}
for i, (fwd, rev) in enumerate(dialout_primers,start=1):
kosuri[fwd] = 'fwd_{0}'.format(i)
kosuri[rev] = 'rev_{0}'.format(i)
def validate_design(df):
if not (df[VECTOR] == 'CROPseq').all():
raise ValueError('can only validate CROPseq design')
return df
return (df_oligos
.pipe(validate_design)
.assign(sgRNA=lambda x: x['sgRNA'].str.upper())
.assign(oligo=lambda x: x['oligo'].str.upper())
.pipe(lambda x: pd.concat([x, x['oligo'].str.extract(pat)], axis=1))
.assign(dialout_rev=lambda x: x['dialout_rev'].apply(reverse_complement))
.assign(dialout_fwd_ix=lambda x: x['dialout_fwd'].apply(kosuri.get))
.assign(dialout_rev_ix=lambda x: x['dialout_rev'].apply(kosuri.get))
.assign(dialout_ix=lambda x:
x['dialout_fwd_ix'].str.split('_').str[1].astype(int))
)
def validate_test(df_test):
"""Check sgRNA cloning and identiy of dialout primers.
"""
assert df_test.eval('sgRNA_cloned == sgRNA').all()
assert (df_test['dialout_fwd_ix'].str[-1] ==
df_test['dialout_rev_ix'].str[-1]).all()
assert df_test.eval('dialout_ix== dialout').all()
print('Looking good!')
return df_test
def reverse_complement(seq):
watson_crick = {'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'U': 'A',
'N': 'N'}
watson_crick.update({k.lower(): v.lower()
for k, v in watson_crick.items()})
return ''.join(watson_crick[x] for x in seq)[::-1]
# CODES
def distance_prefix(a, b):
"""Hack to get edit distance of string prefixes. Only works
for single insertion/deletion/substitution. Should be equivalent
to Levenshtein distance, ignoring the n + 1 position.
"""
compare = [
# substitution
(a[:-1], b[:-1]),
# deletion
(a[:-1], b),
(a, b[:-1]),
# insertion
(a[:-1], b[:-1] + a[-1]),
(b[:-1], a[:-1] + b[-1]),
]
return min(Levenshtein.distance(x1, x2) for x1, x2 in compare)
def khash(s, k):
"""Divide a string into substrings suitable for checking edit distance of
`k`. Two strings of the same length with Levenshtein edit distance less
than `k` will share at least one substring.
"""
n = len(s)
window = int(np.ceil((n - k) / float(k)))
s = s + s
arr = []
for i in range(n):
# arr += [s[i:i+window]]
for j in (0, 1):
arr += [((i + j) % n, s[i:i+window])]
return arr
def build_khash(xs, k, return_dict=False):
D = defaultdict(list)
for x in xs:
for h in khash(x, k):
D[h].append(x)
D = {k: sorted(set(v)) for k,v in D.items()}
if return_dict:
return D
else:
hash_buckets = list(D.values())
return hash_buckets
def sparse_dist(hash_buckets, threshold, distance=None, progress=None):
"""Entries less than threshold only.
"""
if distance is None:
distance = Levenshtein.distance
if progress is None:
progress = lambda x: x
D = {}
for xs in progress(hash_buckets):
for i, a in enumerate(xs):
for b in xs[i+1:]:
d = distance(a,b)
if d < threshold:
key = tuple(sorted((a,b)))
D[key] = d
return D
def sparse_view(xs, D, symmetric=True):
"""string barcodes
"""
assert len(xs) == len(set(xs))
mapper = {x: i for i, x in enumerate(xs)}
f = lambda x: mapper[x]
if len(D) == 0:
i, j, data = [], [], []
else:
i, j, data = zip(*[(f(a), f(b), v) for (a, b), v in D.items()])
# sparse matrix uses zero for missing values
data = np.array(data) >= 0
i = np.array(i)
j = np.array(j)
n = len(xs)
cm = scipy.sparse.coo_matrix((data, (i, j)), shape=(n, n))
if symmetric:
cm = (cm + cm.T).tocsr()
return cm
def maxy_clique_groups(cm, group_ids, verbose=False):
"""Prioritizes groups with the fewest selected barcodes.
Prioritizing groups with the fewest remaining barcodes could give
better results.
"""
# counts => group_id
d1 = defaultdict(set)
for id_, counts in Counter(group_ids).items():
d1[counts] |= {id_}
# group_id => indices
d2 = defaultdict(list)
for i, id_ in enumerate(group_ids):
d2[id_] += [i]
# .pop() takes from the end of the list
d2 = {k: v[::-1] for k,v in d2.items()}
# group_id => # selected
d3 = Counter()
selected = []
available = np.array(range(len(group_ids)))
while d1:
if verbose and (len(selected) % 1000) == 0:
print(len(selected))
# assert cm[selected, :][:, selected].sum() == 0
# pick a group_id from the lowest bin
count = min(d1.keys())
id_ = d1[count].pop()
# remove bin if empty
if len(d1[count]) == 0:
d1.pop(count)
# discard indices until we find a new one
index = None
while d2[id_]:
index = d2[id_].pop()
# approach 1: check for conflict every time
# cm[index, selected].sum() == 0
# approach 2: keep an array of available indices
if index in available:
break
else:
index = None
# keep index
if index is not None:
selected.append(index)
d3[id_] += 1
available = available[available != index]
# get rid of incompatible barcodes
remove = cm[index, available].indices
mask = np.ones(len(available), dtype=bool)
mask[remove] = False
available = available[mask]
# move group_id to another bin
n = len(d2[id_])
if n > 0:
d1[n] |= {id_}
return selected
def sparse_dist_parallel(hash_buckets, threshold, distance=None):
from multiprocessing import Pool
n = num_cores * 10
ix = np.floor(np.linspace(0, len(hash_buckets), n)).astype(int)
arr = []
for i, j in zip(ix, ix[1:]):
arr += [(hash_buckets[i:j], threshold, distance)]
with Pool(num_cores) as p:
results = p.starmap(sparse_dist, arr)
D = {}
for d in results:
D.update(d)
return D
def select_prefixes_edit_distance(sequences, group_ids, prefix_length,
min_distance):
if min_distance != 2:
msg = 'prefix distance only correct for single edits'
raise NotImplementedError(msg)
# remove duplicate prefixes immediately
prefix_series = (pd.Series(list(sequences))
.str[:prefix_length + 1]
.drop_duplicates())
index_map = np.array(prefix_series.index)
prefixes = list(prefix_series)
group_ids = np.array(group_ids)[index_map]
print(len(sequences))
if len(sequences) > 80000:
work_id = hash(str(sequences) + str(prefix_length) + str(min_distance))
print(prefix_length, min_distance, work_id)
f = 'design/pool2/output_L{}_D_{}.pkl'.format(prefix_length, work_id)
if os.path.exists(f):
print('loading distance dictionary', f)
with open(f, 'rb') as fh:
import pickle
D = pickle.load(fh)
else:
hash_buckets = build_khash(prefixes, min_distance)
print('hashed {} prefixes into {} buckets'
.format(len(prefixes), len(hash_buckets)))
# save work
ops.hash_buckets = (work_id, hash_buckets)
assert False
D = sparse_dist(hash_buckets, threshold=min_distance,
distance=distance_prefix)
else:
hash_buckets = build_khash(prefixes, min_distance)
print('hashed {} prefixes into {} buckets'
.format(len(prefixes), len(hash_buckets)))
D = sparse_dist_parallel(hash_buckets, threshold=min_distance,
distance=distance_prefix)
cm = sparse_view(prefixes, D)
print('built approximate distance matrix:', cm.shape)
index = maxy_clique_groups(cm, group_ids, verbose=True)
print('selected sgRNAs:', len(index_map[index]))
return list(index_map[index])
# EXTERNAL
def import_brunello(filename):
"""Import "Brunello Library Target Genes", which can be found at:
https://www.addgene.org/pooled-library/broadgpp-human-knockout-brunello/
"""
columns = {'Target Gene ID': GENE_ID
,'Target Gene Symbol': GENE_SYMBOL
,'sgRNA Target Sequence': SGRNA
, 'Rule Set 2 score': SGRNA_SCORE
}
def reassign_nontargeting(df):
"""Given non-targeting sgRNAs a gene ID of -1.
"""
new_ids = []
new_symbols = []
for i, s in df[[GENE_ID, GENE_SYMBOL]].values:
if s == 'Non-Targeting Control':
new_ids.append(-1)
new_symbols.append('nontargeting')
else:
new_ids.append(i)
new_symbols.append(s)
return df.assign(**{GENE_ID: new_ids, GENE_SYMBOL: new_symbols})
df_brunello = (pd.read_csv(filename, sep='\t')
.rename(columns=columns)
.pipe(reassign_nontargeting)
.pipe(ops.utils.cast_cols, int_cols=[GENE_ID])
.assign(**{SGRNA_SCORE: lambda x: x[SGRNA_SCORE].fillna(0)})
.assign(**{RANK: lambda x:
x.groupby(GENE_ID)[SGRNA_SCORE]
.rank(ascending=False, method='first').astype(int)})
[[GENE_ID, GENE_SYMBOL, RANK, SGRNA]]
.sort_values([GENE_ID, RANK])
)
df_brunello.loc[df_brunello.gene_symbol=="AKAP2",'gene_id']=445815
df_brunello.loc[df_brunello.gene_symbol=="PALM2",'gene_id']=445815
df_brunello.loc[df_brunello.gene_symbol=="C10orf12",'gene_id']=84458
df_brunello.loc[df_brunello.gene_symbol=="C10orf131",'gene_id']=387707
df_brunello.loc[df_brunello.gene_symbol=="C16orf47",'gene_id']=463
df_brunello.loc[df_brunello.gene_symbol=="C17orf47",'gene_id']=5414
df_brunello.loc[df_brunello.gene_symbol=="C7orf76",'gene_id']=7979
df_brunello.loc[df_brunello.gene_symbol=="MIA2",'gene_id']=4253
df_brunello.loc[df_brunello.gene_symbol=="NARR",'gene_id']=83871
df_brunello.loc[df_brunello.gene_symbol=="TMEM133",'gene_id']=143872
df_brunello.loc[df_brunello.gene_symbol=="XAGE1B",'gene_id']=653067
df_brunello = df_brunello.query('gene_symbol != ["C2orf48","TMEM257","TXNRD3NB"]').copy()
return df_brunello
def import_brunello_dump(filename):
df_brunello_dump = pd.read_csv(filename)
df_brunello_dump.loc[df_brunello_dump.gene_symbol=="AKAP2",'gene_id']=445815
df_brunello_dump.loc[df_brunello_dump.gene_symbol=="PALM2",'gene_id']=445815
df_brunello_dump.loc[df_brunello_dump.gene_symbol=="C16orf47",'gene_id']=463
df_brunello_dump.loc[df_brunello_dump.gene_symbol=="C17orf47",'gene_id']=5414
df_brunello_dump = df_brunello_dump.query('gene_symbol != ["C2orf48","TMEM257","TXNRD3NB"]').copy()
return df_brunello_dump
def import_tkov3(filename, df_ncbi):
columns = {'GENE': GENE_SYMBOL, 'SEQUENCE': SGRNA}
symbols_to_ids = df_ncbi.set_index(GENE_SYMBOL)[GENE_ID]
# symbols_to_ids.index = symbols_to_ids.index.str.upper()
df_tkov3 = (pd.read_excel(filename)
.rename(columns=columns)
[[GENE_SYMBOL, SGRNA]]
)
df_tkov3 = df_tkov3.query('gene_symbol!=["C2orf48","TMEM257","TXNRD3NB"]').copy()
df_tkov3.loc[df_tkov3.gene_symbol=="ADC",'gene_symbol']="AZIN2"
df_tkov3.loc[df_tkov3.gene_symbol=="AGPAT9",'gene_symbol']="GPAT3"
df_tkov3.loc[df_tkov3.gene_symbol=="AIM1",'gene_symbol']="CRYBG1"
df_tkov3.loc[df_tkov3.gene_symbol=="B3GNT1",'gene_symbol']="B4GAT1"
df_tkov3.loc[df_tkov3.gene_symbol=="C11orf48",'gene_symbol']="LBHD1"
df_tkov3.loc[df_tkov3.gene_symbol=="C15orf38",'gene_symbol']="ARPIN"
df_tkov3.loc[df_tkov3.gene_symbol=="C2ORF15",'gene_symbol']="C2orf15"
df_tkov3.loc[df_tkov3.gene_symbol=="C2orf47",'gene_symbol']="MAIP1"
df_tkov3.loc[df_tkov3.gene_symbol=="C6ORF165",'gene_symbol']="C6orf165"
df_tkov3.loc[df_tkov3.gene_symbol=="C7orf55",'gene_symbol']="FMC1"
df_tkov3.loc[df_tkov3.gene_symbol=="CD97",'gene_symbol']="ADGRE5"
df_tkov3.loc[df_tkov3.gene_symbol=="CXXC11",'gene_symbol']="RTP5"
df_tkov3.loc[df_tkov3.gene_symbol=="FLJ27365",'gene_symbol']="MIRLET7BHG"
df_tkov3.loc[df_tkov3.gene_symbol=="GIF",'gene_symbol']="CBLIF"
df_tkov3.loc[df_tkov3.gene_symbol=="HN1L",'gene_symbol']="JPT2"
df_tkov3.loc[df_tkov3.gene_symbol=="HN1",'gene_symbol']="JPT1"
df_tkov3.loc[df_tkov3.gene_symbol=="KIAA1045",'gene_symbol']="PHF24"
df_tkov3.loc[df_tkov3.gene_symbol=="NAT6",'gene_symbol']="NAA80"
df_tkov3.loc[df_tkov3.gene_symbol=="NOV",'gene_symbol']="CCN3"
df_tkov3.loc[df_tkov3.gene_symbol=="STRA13",'gene_symbol']="CENPX"
df_tkov3.loc[df_tkov3.gene_symbol=="ZHX1-C8ORF76",'gene_symbol']="ZHX1-C8orf76"
df_tkov3.loc[df_tkov3.gene_symbol=="MUM1",'gene_symbol']="PWWP3A"
df_tkov3.loc[df_tkov3.gene_symbol=='CSRP2BP','gene_symbol'] = 'KAT14'
df_tkov3.loc[df_tkov3.gene_symbol=='C10orf2','gene_symbol'] = 'TWNK'
df_tkov3.loc[df_tkov3.gene_symbol=='AZI1','gene_symbol'] = 'CEP131'
df_tkov3.loc[df_tkov3.gene_symbol=='EFTUD1','gene_symbol'] = 'EFL1'
# df_tkov3[GENE_SYMBOL]=df_tkov3[GENE_SYMBOL].str.upper()
return (df_tkov3
.join(symbols_to_ids, on=GENE_SYMBOL, how='left')
.assign(**{RANK: lambda x: ops.utils.rank_by_order(x, GENE_ID)})
)
def import_CRISPRfocus(filename,df_ncbi):
df_CRISPRfocus = pd.read_csv(filename)
df_CRISPRfocus.loc[df_CRISPRfocus.gene_symbol=="ZHX1-C8ORF76",'gene_symbol']="ZHX1-C8orf76"
df_CRISPRfocus.loc[df_CRISPRfocus.gene_symbol=="TGIF2-C20ORF24",'gene_symbol']="TGIF2-C20orf24"
df_CRISPRfocus.loc[df_CRISPRfocus.gene_symbol=="RPL17-C18ORF32",'gene_symbol']="RPL17-C18orf32"
df_CRISPRfocus.loc[df_CRISPRfocus.gene_symbol=="ANXA8L1",'gene_id']=np.nan
df_CRISPRfocus.loc[df_CRISPRfocus.gene_symbol=="MUM1",'gene_symbol']="PWWP3A"
df_CRISPRfocus.loc[df_CRISPRfocus.gene_symbol=="NAT6",'gene_symbol']="NAA80"
df_CRISPRfocus.loc[df_CRISPRfocus.gene_symbol=="SLC35E2",'gene_symbol']="SLC35E2A"
df_CRISPRfocus.loc[df_CRISPRfocus.gene_symbol=="GIF",'gene_symbol']="CBLIF"
df_CRISPRfocus.loc[df_CRISPRfocus.gene_symbol=="NOV",'gene_symbol']="CCN3"
# df_CRISPRfocus_id = df_CRISPRfocus.dropna(subset=['gene_id'])
# df_CRISPRfocus_na = df_CRISPRfocus[df_CRISPRfocus.gene_id.isna()]
symbols_to_ids = df_ncbi.set_index(GENE_SYMBOL)[GENE_ID]
# symbols_to_ids.index = symbols_to_ids.index.str.upper()
# df_CRISPRfocus_na = (df_CRISPRfocus_na
# .drop(columns=['gene_id'])
# .join(symbols_to_ids, on=GENE_SYMBOL, how='left')
# )
df_CRISPRfocus = (df_CRISPRfocus
.drop(columns=['gene_id'])
.join(symbols_to_ids, on=GENE_SYMBOL, how='left')
)
df_CRISPRfocus= df_CRISPRfocus.query('gene_symbol != ["FAM231C","C2orf48","TMEM257","TXNRD3NB","FAM25B"]').copy()
# return pd.concat([df_CRISPRfocus_id,df_CRISPRfocus_na],sort=True)[['gene_symbol','gene_id','sgRNA','rank']]
return df_CRISPRfocus[['gene_symbol','gene_id','sgRNA','rank']]
def import_wang2017(filename,df_ncbi):
df_wang2017 = (pd.read_excel(filename)
.rename(columns={'sgRNA ID':'sgRNA_ID','sgRNA location':'sgRNA_location',
'Genomic strand targeted':'Genomic_strand','sgRNA sequence':'sgRNA',
'Other genes hits':'Other_gene_hits','Symbol':'gene_symbol'})
)
def group_controls(s):
if s.sgRNA_ID.startswith('CTRL'):
s.gene_symbol = 'nontargeting'
elif 'INTERGENIC' in s.sgRNA_ID:
s.gene_symbol = 'INTERGENIC'
return s
df_wang2017 = df_wang2017.apply(lambda x: group_controls(x),axis=1)
df_wang2017 = df_wang2017.query('gene_symbol != "INTERGENIC"').copy()
df_wang2017.loc[df_wang2017.gene_symbol=="NOV",'gene_symbol']="CCN3"
df_wang2017.loc[df_wang2017.gene_symbol=="GIF",'gene_symbol']="CBLIF"
df_wang2017.loc[df_wang2017.gene_symbol=="B3GNT1",'gene_symbol']="B4GAT1"
df_wang2017.loc[df_wang2017.gene_symbol=="C7orf55",'gene_symbol']="FMC1"
df_wang2017.loc[df_wang2017.gene_symbol=="CXXC11",'gene_symbol']="RTP5"
df_wang2017.loc[df_wang2017.gene_symbol=="AGPAT9",'gene_symbol']="GPAT3"
df_wang2017.loc[df_wang2017.gene_symbol=="ZHX1-C8ORF76",'gene_symbol']="ZHX1-C8orf76"
df_wang2017.loc[df_wang2017.gene_symbol=="AIM1",'gene_symbol']="CRYBG1"
df_wang2017.loc[df_wang2017.gene_symbol=="NAT6",'gene_symbol']="NAA80"
df_wang2017.loc[df_wang2017.gene_symbol=="CD97",'gene_symbol']="ADGRE5"
df_wang2017.loc[df_wang2017.gene_symbol=="C15orf38",'gene_symbol']="ARPIN"
df_wang2017.loc[df_wang2017.gene_symbol=="C2orf47",'gene_symbol']="MAIP1"
df_wang2017.loc[df_wang2017.gene_symbol=="STRA13",'gene_symbol']="CENPX"
df_wang2017.loc[df_wang2017.gene_symbol=="C11orf48",'gene_symbol']="LBHD1"
df_wang2017.loc[df_wang2017.gene_symbol=="MUM1",'gene_symbol']="PWWP3A"
df_wang2017.loc[df_wang2017.gene_symbol=="HN1L",'gene_symbol']="JPT2"
df_wang2017.loc[df_wang2017.gene_symbol=="HN1",'gene_symbol']="JPT1"
df_wang2017.loc[df_wang2017.gene_symbol=="ADC",'gene_symbol']="AZIN2"
df_wang2017.loc[df_wang2017.gene_symbol=="TRIM49D2P",'gene_symbol']="TRIM49D2"
df_wang2017.loc[df_wang2017.gene_symbol=="FAM21A",'gene_symbol']="WASHC2A"
df_wang2017.loc[df_wang2017.gene_symbol=="SLC35E2",'gene_symbol']="SLC35E2A"
df_wang2017.loc[df_wang2017.gene_symbol=="APITD1",'gene_symbol']="CENPS"
df_wang2017.loc[df_wang2017.gene_symbol=="LIMS3L",'gene_symbol']="LIMS4"
df_wang2017.loc[df_wang2017.gene_symbol=='CSRP2BP','gene_symbol'] = 'KAT14'
df_wang2017.loc[df_wang2017.gene_symbol=='AZI1','gene_symbol'] = 'CEP131'
df_wang2017.loc[df_wang2017.gene_symbol=='TCEB3C','gene_symbol'] = 'ELOA3'
df_wang2017.loc[df_wang2017.gene_symbol=='TCEB3CL','gene_symbol'] = 'ELOA3B'
df_wang2017.loc[df_wang2017.gene_symbol=='EFTUD1','gene_symbol'] = 'EFL1'
df_wang2017.loc[df_wang2017.gene_symbol=='CGB','gene_symbol'] = 'CGB3'
df_wang2017.loc[df_wang2017.gene_symbol=='C10orf2','gene_symbol'] = 'TWNK'
df_wang2017 = df_wang2017.query(('gene_symbol != '
'["CT45A4","SMCR9","PRAMEF3",'
'"SPANXE","PRAMEF16","C2orf48",'
'"TMEM257","TXNRD3NB","FOXD4L2","FAM25B"]'
)
).copy()
symbols_to_ids = df_ncbi.set_index('gene_symbol')['gene_id']
# symbols_to_ids.index = symbols_to_ids.index.str.upper()
# df_wang2017['gene_symbol']=df_wang2017['gene_symbol'].str.upper()
df_wang2017 = (df_wang2017
.join(symbols_to_ids, on=['gene_symbol'], how='left')
.assign(**{RANK: lambda x: ops.utils.rank_by_order(x, 'gene_symbol')})
)
def LOC_to_ID(s):
if s.gene_symbol.startswith('LOC') & np.isnan(s.gene_id):
s.gene_id = s.gene_symbol[3:]
return s
df_wang2017 = df_wang2017.apply(lambda x: LOC_to_ID(x),axis=1)
df_wang2017.loc[df_wang2017.gene_symbol=="CRIPAK",'gene_id'] = 285464
df_wang2017.loc[df_wang2017.gene_symbol=="FAM231A",'gene_id'] = 729574
df_wang2017.loc[df_wang2017.gene_symbol=="KIAA1804",'gene_id'] = 84451
df_wang2017.loc[df_wang2017.gene_symbol=="KIAA1045",'gene_id'] = 23349
df_wang2017.loc[df_wang2017.gene_symbol == "nontargeting",'gene_id']=-1
return df_wang2017[['gene_symbol','gene_id','sgRNA','rank']]
def import_GPP_designer_results(filename):
df_gpp = | pd.read_csv(filename,sep='\t') | pandas.read_csv |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": | pandas.StringDtype() | pandas.StringDtype |
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
import plotly as plt
import folium
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster, marker_cluster
st.set_page_config(layout='wide')
@st.cache(allow_output_mutation=True)
def get_data(path):
data = pd.read_csv(path)
return data
def date_format (df):
df['date'] = pd.to_datetime(df['date'])
return df
def drop_features (df):
df = df.drop(['sqft_living15', 'sqft_lot15'], axis=1)
return df
def delete_duplicates(df):
df = df.drop_duplicates(subset = ['id'], keep = 'last')
return df
def transform_feature(df):
for i in range (len(df)):
if (df.loc[i, 'waterfront'] == 0):
df.loc[i, 'waterfront'] = 'no'
elif (df.loc[i, 'waterfront'] == 1):
df.loc[i, 'waterfront'] = 'yes'
return df
def new_features(df):
df['age_build'] = df['yr_built'].apply(lambda x: 'build <1955' if x < 1955 else 'build > 1955')
df['basement'] = df['sqft_basement'].apply(lambda x: 'basement' if x > 0 else 'no_basement')
df['renovation'] = df['yr_renovated'].apply(lambda x: 'renovation' if x > 0 else 'no_renovation')
df['condition_type'] = df['condition'].apply(lambda x: 'bad' if x <=2 else
'regular' if x == 3 or x == 4 else
'good')
df['year']= df['date'].dt.year
df['month'] = df['date'].dt.month
df['season'] = df['month'].apply(lambda x: 'summer' if (x > 6) & (x <= 9) else
'spring' if (x > 3) & (x <= 6) else
'fall' if (x > 9) & (x <= 12) else
'winter')
return df
def data_overview(df):
st.title('House Rocket Project')
st.sidebar.title('House Rocket Project')
st.sidebar.write('''House Rocket is a platform for buying and selling properties. Its business model is buying properties
and then selling them at a higher price.
The goal is to maximize the profit of the company, finding good deals.''')
st.sidebar.write("Here you can find more information on this project: "
"[GitHub](https://github.com/cmfonseca/project-house-rocket)")
if st.checkbox('Show raw dataset'):
st.header('Data Overview')
f_attributes = st.sidebar.multiselect('Enter columns', df.columns)
f_zipcode = st.sidebar.multiselect('Enter zipcode', df['zipcode'].unique())
if f_zipcode != [] and f_attributes != []:
df = df.loc[df['zipcode'].isin(f_zipcode), f_attributes]
elif f_zipcode != [] and f_attributes == []:
df = df.loc[df['zipcode'].isin(f_zipcode), :]
elif f_zipcode == [] and f_attributes != []:
df = df.loc[:, f_attributes]
else:
df = df.copy()
st.dataframe(df)
return None
def descript_stat (df):
st.header('Descriptive Statistics')
num_attributes = df.select_dtypes(include=['int64', 'float64'])
num_attributes = num_attributes.iloc[:, 1:]
#Estimates of location
avg = pd.DataFrame(num_attributes.apply(np.mean))
median = pd.DataFrame(num_attributes.apply(np.median))
#Estimates of variability
std = pd.DataFrame(num_attributes.apply(np.std))
max_ = pd.DataFrame(num_attributes.apply(np.max))
min_ = pd.DataFrame(num_attributes.apply(np.min))
#Concat
d_st = pd.concat([avg, median, std, min_, max_], axis=1).reset_index()
#Changing column names
d_st.columns = ['features', 'average', 'median', 'std', 'min', 'max']
st.dataframe(d_st, width = 900)
return df
def data_analysis(df):
st.header('Data Insights')
#######
#H01
#######
c1, c2 = st.beta_columns(2)
c1.subheader('H01: Properties that are waterfront are, in average, 30% more expensive.')
water_price = df[['price', 'waterfront']].groupby('waterfront').mean().reset_index()
fig = px.bar(water_price, x="waterfront", y="price", color = 'waterfront', template= 'seaborn',
labels={'waterfront':'Waterfront', 'price': 'Property Price'})
fig.update_layout(showlegend=False)
c1.plotly_chart(fig, use_container_width=True)
#######
#H02
#######
c2.subheader('H02: Properties built before 1955 are, in average, 50% less expensive.')
build_price = df[['price', 'age_build']].groupby('age_build').mean().reset_index()
fig = px.bar(build_price, x="age_build", y="price", color='age_build', template= 'seaborn',
labels={'age_build':'Age of the Building', 'price': 'Property Price'})
fig.update_layout(showlegend=False)
c2.plotly_chart(fig, use_container_width=True)
#######
#H03
#######
c1, c2 = st.beta_columns(2)
c1.subheader('H03: Properties without basement have a sqrt_lot 50% bigger than the ones with basement.')
basement_lot = df[['sqft_lot', 'basement']].groupby('basement').sum().reset_index()
fig = px.bar(basement_lot, x="basement", y="sqft_lot", color='basement', template = 'seaborn',
labels={'basement':'Basement', 'sqft_lot': 'Property Lot Size'})
fig.update_layout(showlegend=False)
c1.plotly_chart(fig, use_container_width=True)
#######
#H04
#######
c2.subheader('H04: Properties with higher number of bedrooms are, in average, 10% more expensive.')
bedrooms_price = df[['price', 'bedrooms']].groupby('bedrooms').mean().reset_index()
bedrooms_price['bedrooms']= bedrooms_price['bedrooms'].astype(str)
fig = px.bar(bedrooms_price, x="bedrooms", y="price", color='bedrooms', template='seaborn',
labels={'bedrooms':'Number of Bedrooms', 'price': 'Property Price'})
fig.update_layout(showlegend=False)
c2.plotly_chart(fig, use_container_width=True)
#######
#H05
#######
c1, c2 = st.beta_columns(2)
c1.subheader('H05: Properties that were never renovated are, in average, 20% less expensive.')
renovation_price = df[['price', 'renovation']].groupby('renovation').mean().reset_index()
fig = px.bar(renovation_price, x="renovation", y="price", color='renovation', template='seaborn',
labels={'renovation':'Renovation', 'price': 'Property Price'})
fig.update_layout(showlegend=False)
c1.plotly_chart(fig, use_container_width=True)
#######
#H06
#######
c2.subheader('H06: Older properties that were never renovated are 40% less expensive.')
data = df[df['age_build'] == 'build <1955']
renovation_price = data[['price', 'renovation']].groupby('renovation').mean().reset_index()
fig = px.bar(renovation_price, x="renovation", y="price", color='renovation', template='seaborn',
labels={'renovation':'Renovation', 'price': 'Property Price'})
fig.update_layout(showlegend=False)
c2.plotly_chart(fig, use_container_width=True)
#######
#H07
#######
c1, c2 = st.beta_columns(2)
c1.subheader('H07: Properties that were renovated recently are, in average, 10% more expensive.')
data = df[df['renovation'] == 'renovation']
data = data.copy()
data['age_renovation'] = data['yr_renovated'].apply(lambda x: 'new_renovation' if x >= 2000 else 'old_renovation')
new_renovation_price = data[['price', 'age_renovation']].groupby('age_renovation').mean().reset_index()
fig = px.bar(new_renovation_price, x="age_renovation", y="price", color='age_renovation',
template = 'seaborn', labels={'age_renovation':'Age of Renovation', 'price': 'Property Price'})
fig.update_layout(showlegend=False)
c1.plotly_chart(fig, use_container_width=True)
#######
#H08
#######
c2.subheader('H08: Older properties that were never renovated are 40% less expensive.')
data = df[df['condition_type'] == 'bad']
bad_water_price = data[['price', 'waterfront']].groupby('waterfront').mean().reset_index()
fig= px.bar(bad_water_price, x="waterfront", y="price", color='waterfront', template='seaborn',
labels={'waterfront':'Waterfront', 'price': 'Property Price'})
fig.update_layout(showlegend=False)
c2.plotly_chart(fig, use_container_width=True)
#######
#H09
#######
c1, c2 = st.beta_columns(2)
c1.subheader('H09: The YoY (Year over Year) growth of the price of the properties is of 10%.')
YoY = df[['year', 'price']].groupby('year').median().reset_index()
YoY['year']= YoY['year'].astype(str)
fig = px.bar(YoY, x="year", y="price", color='year', template='seaborn', labels={'year':'Year', 'price': 'Property Price'})
fig.update_layout(showlegend=False)
c1.plotly_chart(fig, use_container_width=True)
#######
#H10
#######
c2.subheader('H10: The MoM (Month over Month) growth of the price of the properties is 15%.')
MoM = df[['price', 'year', 'month']].groupby('month').mean().reset_index()
MoM['month']=MoM['month'].apply(lambda x: 'Jan' if x == 1 else
'Feb' if x == 2 else
'Mar' if x == 3 else
'Apr' if x == 4 else
'May' if x == 5 else
'Jun' if x == 6 else
'Jul' if x == 7 else
'Aug' if x == 8 else
'Sep' if x == 9 else
'Oct' if x == 10 else
'Nov' if x == 11 else 'Dec')
fig= px.line(MoM, x='month', y='price', template='seaborn', labels={'month':'Month', 'price': 'Property Price'})
fig.update_traces(mode='markers+lines')
c2.plotly_chart(fig, use_container_width=True)
def business_challenges(df):
st.header('Data Insights')
###############
#Q.01
###############
st.subheader('1. Which properties should House Rocket buy, and at what price?')
data = df[['id', 'zipcode', 'price', 'condition', 'lat', 'long', 'season']]
data = data.copy()
# Determine median price by zipcode
zipcode_price = data[['zipcode', 'price']].groupby('zipcode').median().reset_index()
# Defining buying strategy
buy_strat = pd.merge(zipcode_price, data, on='zipcode', how='inner') # Join columns
buy_strat = buy_strat.rename(columns={'price_x': 'zipcode_median', 'price_y': 'property_price'}) # Rename columns
buy_strat = buy_strat.reindex(columns=['id', 'zipcode', 'lat', 'long', 'season',
'condition', 'zipcode_median', 'property_price']) # Order columns
buy_strat['buy'] = 'NA' # Create empty column
# Populate column buy
# Buy only properties in regular/good condition and with prices below the median price by zipcode
for i in range (len(buy_strat)):
if (buy_strat.loc[i, 'zipcode_median'] >= buy_strat.loc[i, 'property_price']) & (buy_strat.loc[i, 'condition'] > 3):
buy_strat.loc[i, 'buy'] = 'yes'
else:
buy_strat.loc[i, 'buy'] = 'no'
# Map
data_map = buy_strat[['id', 'lat', 'long', 'property_price', 'condition', 'buy', 'zipcode_median']]
buy_map = folium.Map(location=[df['lat'].mean(), df['long'].mean()],
width= 600,
height=300,
default_zoom_start=30)
for i, row in data_map.iterrows():
if row['buy'] == 'yes':
folium.CircleMarker([row['lat'], row['long']], color="#2e8540", fill=True, fill_color="#3186cc", radius= 5,
popup=('''Price ${0}.
Condition: {1},
Zipcode median ${2}
Buy: {3}''').format(row['property_price'],
row['condition'],
row['zipcode_median'],
row['buy']),).add_to(buy_map)
else:
folium.CircleMarker([row['lat'], row['long']], color="#cd2026", fill=True, fill_color="#3186cc", radius= 5,
popup=('''Price ${0}.
Condition: {1},
Zipcode median ${2}
Buy: {3}''').format(row['property_price'],
row['condition'],
row['zipcode_median'],
row['buy']),).add_to(buy_map)
folium_static(buy_map)
###############
#Q.02
###############
st.subheader('2. Once the property is aquired, what is the best moment to sell, and by which price?')
data = buy_strat[buy_strat['buy'] == 'yes']
# Determine median price by zipcode, by season
season_zip_price = data[['season', 'zipcode', 'property_price']].groupby(['zipcode', 'season']).median().reset_index()
season_zip_price = season_zip_price.rename(columns={'season': 'season_median', 'property_price': 'season_prop_price'})
# Defining selling strategy
sell_strat = | pd.merge(season_zip_price, data, on='zipcode', how='inner') | pandas.merge |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..kabam_exe import Kabam
test = {}
class TestKabam(unittest.TestCase):
"""
Unit tests for Kabam model.
: unittest will
: 1) call the setup method,
: 2) then call every method starting with "test",
: 3) then the teardown method
"""
print("kabam unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for Kabam unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open Kabam qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for Kabam unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_kabam_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty kabam object
kabam_empty = Kabam(df_empty, df_empty)
return kabam_empty
def test_ventilation_rate(self):
"""
:description Ventilation rate of aquatic animal
:unit L/d
:expression Kabam Eq. A5.2b (Gv)
:param zoo_wb: wet weight of animal (kg)
:param conc_do: concentration of dissolved oxygen (mg O2/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
try:
#use the zooplankton variables/values for the test
kabam_empty.zoo_wb = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float')
kabam_empty.conc_do = pd.Series([5.0, 10.0, 7.5], dtype='float')
result = kabam_empty.ventilation_rate(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_uptake_eff_gills(self):
"""
:description Pesticide uptake efficiency by gills
:unit fraction
"expresssion Kabam Eq. A5.2a (Ew)
:param log kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 0.540088, 0.540495], dtype = 'float')
try:
kabam_empty.log_kow = pd.Series(['nan', 5., 6.], dtype = 'float')
kabam_empty.kow = 10.**(kabam_empty.log_kow)
result = kabam_empty.pest_uptake_eff_bygills()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_phytoplankton_k1_calc(self):
"""
:description Uptake rate constant through respiratory area for phytoplankton
:unit: L/kg*d
:expression Kabam Eq. A5.1 (K1:unique to phytoplankton)
:param log kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float')
try:
kabam_empty.log_kow = pd.Series([4., 5., 6.], dtype = 'float')
kabam_empty.kow = 10.**(kabam_empty.log_kow)
result = kabam_empty.phytoplankton_k1_calc(kabam_empty.kow)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_k1_calc(self):
"""
U:description ptake rate constant through respiratory area for aquatic animals
:unit: L/kg*d
:expression Kabam Eq. A5.2 (K1)
:param pest_uptake_eff_bygills: Pesticide uptake efficiency by gills of aquatic animals (fraction)
:param vent_rate: Ventilation rate of aquatic animal (L/d)
:param wet_wgt: wet weight of animal (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 1201.13849, 169.37439], dtype = 'float')
try:
pest_uptake_eff_bygills = pd.Series(['nan', 0.0304414, 0.0361228], dtype = 'float')
vent_rate = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
wet_wgt = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float')
result = kabam_empty.aq_animal_k1_calc(pest_uptake_eff_bygills, vent_rate, wet_wgt)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_water_part_coef(self):
"""
:description Organism-Water partition coefficient (based on organism wet weight)
:unit ()
:expression Kabam Eq. A6a (Kbw)
:param zoo_lipid: lipid fraction of organism (kg lipid/kg organism wet weight)
:param zoo_nlom: non-lipid organic matter (NLOM) fraction of organism (kg NLOM/kg organism wet weight)
:param zoo_water: water content of organism (kg water/kg organism wet weight)
:param kow: octanol-water partition coefficient ()
:param beta: proportionality constant expressing the sorption capacity of NLOM or NLOC to
that of octanol
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_lipid_frac = pd.Series([0.03, 0.04, 0.06], dtype = 'float')
kabam_empty.zoo_nlom_frac = pd.Series([0.10, 0.20, 0.30,], dtype = 'float')
kabam_empty.zoo_water_frac = pd.Series([0.87, 0.76, 0.64], dtype = 'float')
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
beta = 0.35
result = kabam_empty.animal_water_part_coef(kabam_empty.zoo_lipid_frac,
kabam_empty.zoo_nlom_frac,
kabam_empty.zoo_water_frac, beta)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_k2_calc(self):
"""
:description Elimination rate constant through the respiratory area
:unit (per day)
:expression Kabam Eq. A6 (K2)
:param zoo_k1: Uptake rate constant through respiratory area for aquatic animals
:param k_bw_zoo (Kbw): Organism-Water partition coefficient (based on organism wet weight ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([2.5186969, 0.79045921, 0.09252798], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_k1 = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float')
kabam_empty.k_bw_zoo = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float')
result = kabam_empty.aq_animal_k2_calc(kabam_empty.zoo_k1, kabam_empty.k_bw_zoo)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_grow_rate_const(self):
"""
:description Aquatic animal/organism growth rate constant
:unit (per day)
:expression Kabam Eq. A7.1 & A7.2
:param zoo_wb: wet weight of animal/organism (kg)
:param water_temp: water temperature (degrees C)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.01255943, 0.00125594, 0.00251], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.0], dtype = 'float')
kabam_empty.water_temp = pd.Series([10., 15., 20.], dtype = 'float')
result = kabam_empty.animal_grow_rate_const(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_dietary_trans_eff(self):
"""
:description Aquatic animal/organizm dietary pesticide transfer efficiency
:unit fraction
:expression Kabam Eq. A8a (Ed)
:param kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.499251, 0.492611, 0.434783], dtype = 'float')
try:
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
result = kabam_empty.dietary_trans_eff()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_feeding_rate(self):
"""
:description Aquatic animal feeding rate (except filterfeeders)
:unit kg/d
:expression Kabam Eq. A8b1 (Gd)
:param wet_wgt: wet weight of animal/organism (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([4.497792e-08, 1.0796617e-3, 0.073042572], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.], dtype = 'float')
kabam_empty.water_temp = pd.Series([10., 15., 20.])
result = kabam_empty.aq_animal_feeding_rate(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filterfeeder_feeding_rate(self):
"""
:description Filter feeder feeding rate
:unit kg/d
:expression Kabam Eq. A8b2 (Gd)
:param self.gv_filterfeeders: filterfeeder ventilation rate (L/d)
:param self.conc_ss: Concentration of Suspended Solids (Css - kg/L)
:param particle_scav_eff: efficiency of scavenging of particles absorbed from water (fraction)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 1.97287e-7, 0.03282195], dtype = 'float')
try:
kabam_empty.gv_filterfeeders = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
kabam_empty.conc_ss = pd.Series([0.00005, 0.00005, 0.07], dtype = 'float')
kabam_empty.particle_scav_eff = 1.0
result = kabam_empty.filterfeeders_feeding_rate()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_diet_uptake_rate_const(self):
"""
:description pesticide uptake rate constant for uptake through ingestion of food rate
:unit kg food/kg organism - day
:expression Kabam Eq. A8 (kD)
:param dietary_trans_eff: dietary pesticide transfer efficiency (fraction)
:param feeding rate: animal/organism feeding rate (kg/d)
:param wet weight of aquatic animal/organism (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.22455272, 0.05318532, 0.031755767 ], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.ed_zoo = pd.Series([0.499251, 0.492611, 0.434783], dtype = 'float')
kabam_empty.gd_zoo = pd.Series([4.497792e-08, 1.0796617e-3, 0.073042572], dtype = 'float')
kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.0])
result = kabam_empty.diet_uptake_rate_const(kabam_empty.ed_zoo, \
kabam_empty.gd_zoo, kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_overall_diet_content(self):
"""
:description Overall fraction of aquatic animal/organism diet attributed to diet food component
(i.e., lipids or NLOM or water)
:unit kg/kg
:expression not shown in Kabam documentation: it is associated with Kabam Eq. A9
overall_diet_content is equal to the sum over dietary elements
: of (fraction of diet) * (content in diet element); for example zooplankton ingest seidment and
: phytoplankton, thus the overall lipid content of the zooplankton diet equals
: (fraction of sediment in zooplankton diet) * (fraction of lipids in sediment) +
: (fraction of phytoplankton in zooplankton diet) * (fraction of lipids in phytoplankton)
:param diet_fraction: list of values representing fractions of aquatic animal/organism diet attibuted
to each element of diet
:param content_fraction: list of values representing fraction of diet element attributed to a specific
component of that diet element (e.g., lipid, NLOM, or water)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.025, 0.03355, 0.0465], dtype = 'float')
try:
#For test purposes we'll use the small fish diet variables/values
kabam_empty.sfish_diet_sediment = pd.Series([0.0, 0.01, 0.05], dtype = 'float')
kabam_empty.sfish_diet_phytoplankton = pd.Series([0.0, 0.01, 0.05], dtype = 'float')
kabam_empty.sfish_diet_zooplankton = pd.Series([0.5, 0.4, 0.5], dtype = 'float')
kabam_empty.sfish_diet_benthic_invertebrates = pd.Series([0.5, 0.57, 0.35], dtype = 'float')
kabam_empty.sfish_diet_filterfeeders = pd.Series([0.0, 0.01, 0.05], dtype = 'float')
kabam_empty.sediment_lipid = pd.Series([0.0, 0.01, 0.0], dtype = 'float')
kabam_empty.phytoplankton_lipid = pd.Series([0.02, 0.015, 0.03], dtype = 'float')
kabam_empty.zoo_lipid = pd.Series([0.03, 0.04, 0.05], dtype = 'float')
kabam_empty.beninv_lipid = pd.Series([0.02, 0.03, 0.05], dtype = 'float')
kabam_empty.filterfeeders_lipid = pd.Series([0.01, 0.02, 0.05], dtype = 'float')
diet_elements = pd.Series([], dtype = 'float')
content_fracs = pd.Series([], dtype = 'float')
for i in range(len(kabam_empty.sfish_diet_sediment)):
diet_elements = [kabam_empty.sfish_diet_sediment[i],
kabam_empty.sfish_diet_phytoplankton[i],
kabam_empty.sfish_diet_zooplankton[i],
kabam_empty.sfish_diet_benthic_invertebrates[i],
kabam_empty.sfish_diet_filterfeeders[i]]
content_fracs = [kabam_empty.sediment_lipid[i],
kabam_empty.phytoplankton_lipid[i],
kabam_empty.zoo_lipid[i],
kabam_empty.beninv_lipid[i],
kabam_empty.filterfeeders_lipid[i]]
result[i] = kabam_empty.overall_diet_content(diet_elements, content_fracs)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fecal_egestion_rate_factor(self):
"""
Aquatic animal/organism egestion rate of fecal matter factor (to be multiplied by the
feeding rate to calculate egestion rate of fecal matter)
:unit (kg feces)/[(kg organism) - day]
:expression Kabam Eq. A9 (GF)
:param epsilonL: dietary assimilation rate of lipids (fraction)
:param epsilonN: dietary assimilation rate of NLOM (fraction)
:param epsilonW: dietary assimilation rate of water (fraction)
:param diet_lipid; lipid content of aquatic animal/organism diet (fraction)
:param diet_nlom NLOM content of aquatic animal/organism diet (fraction)
:param diet_water water content of aquatic animal/organism diet (fraction)
:param feeding_rate: aquatic animal/organism feeding rate (kg/d)
:return:
"""
#this test includes two results; 'result1' represents the overall assimilation rate of the
#aquatic animal/organism diet; and 'result' represents the product of this assimilation rate
#and the feeding rate (this multiplication will be done in the main model routine
#as opposed to within a method -- the method here is limited to the assimilation factor
#because this factor is used elsewhere as well
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
result1 = pd.Series([], dtype='float')
expected_results = pd.Series([1.43e-9, 5.005e-5, 4.82625e-3], dtype = 'float')
try:
#For test purposes we'll use the zooplankton variable names and relevant constant values
kabam_empty.epsilon_lipid_zoo = 0.72
kabam_empty.epsilon_nlom_zoo = 0.60
kabam_empty.epsilon_water = 0.25
kabam_empty.v_ld_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float')
kabam_empty.v_nd_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float')
kabam_empty.v_wd_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float')
kabam_empty.gd_zoo = pd.Series([4.e-08, 1.e-3, 0.075], dtype = 'float')
result1 = kabam_empty.fecal_egestion_rate_factor(kabam_empty.epsilon_lipid_zoo,
kabam_empty.epsilon_nlom_zoo,
kabam_empty.epsilon_water,
kabam_empty.v_ld_zoo,
kabam_empty.v_nd_zoo,
kabam_empty.v_wd_zoo)
result = result1 * kabam_empty.gd_zoo
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_diet_elements_gut(self):
"""
Fraction of diet elements (i.e., lipid, NLOM, water) in the gut
:unit (kg lipid) / (kg digested wet weight)
:expression Kabam Eq. A9 (VLG, VNG, VWG)
:param (epison_lipid_*) relevant dietary assimilation rate (fraction)
:param (v_ld_*) relevant overall diet content of diet element (kg/kg)
:param (diet_assim_factor_*) relevant: Aquatic animal/organism egestion rate of fecal matter factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.2, 0.196, 0.1575], dtype = 'float')
try:
#for this test we'll use the lipid content for zooplankton
kabam_empty.epsilon_lipid_zoo = 0.72
kabam_empty.v_ld_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float')
kabam_empty.diet_assim_factor_zoo = pd.Series([0.035, 0.05, 0.08], dtype = 'float')
result = kabam_empty.diet_elements_gut(kabam_empty.epsilon_lipid_zoo,
kabam_empty.v_ld_zoo, kabam_empty.diet_assim_factor_zoo)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_gut_organism_partition_coef(self):
"""
Partition coefficient of the pesticide between the gastrointenstinal track and the organism
:unit none
:expression Kabam Eq. A9 (KGB)
:param vlg_zoo: lipid content in the gut
:param vng_zoo: nlom content in the gut
:param vwg_zoo: water content in the gut
:param kow: pesticide Kow
:param beta_aq_animals: proportionality constant expressing the sorption capacity of NLOM to that of octanol
:param zoo_lipid_frac: lipid content in the whole organism
:param zoo_nlom_frac: nlom content in the whole organism
:param zoo_water_frac: water content in the whole organism
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.991233, 1.662808, 1.560184], dtype = 'float')
try:
#for this test we'll use the zooplankton varialbles
kabam_empty.beta_aq_animals = 0.035
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
kabam_empty.vlg_zoo = pd.Series([0.2, 0.25, 0.15], dtype = 'float')
kabam_empty.vng_zoo = pd.Series([0.1, 0.15, 0.25], dtype = 'float')
kabam_empty.vwg_zoo = pd.Series([0.15, 0.35, 0.05], dtype = 'float')
kabam_empty.zoo_lipid_frac = pd.Series([0.20, 0.15, 0.10], dtype = 'float')
kabam_empty.zoo_nlom_frac = pd.Series([0.15, 0.10, 0.05], dtype = 'float')
kabam_empty.zoo_water_frac = pd.Series([0.65, 0.75, 0.85], dtype = 'float')
result = kabam_empty.gut_organism_partition_coef(kabam_empty.vlg_zoo, kabam_empty.vng_zoo,
kabam_empty.vwg_zoo, kabam_empty.kow, kabam_empty.beta_aq_animals,
kabam_empty.zoo_lipid_frac, kabam_empty.zoo_nlom_frac,
kabam_empty.zoo_water_frac)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fecal_elim_rate_const(self):
"""
rate constant for elimination of the pesticide through excretion of contaminated feces
:unit per day
:param gf_zoo: egestion rate of fecal matter (kg feces)/(kg organism-day)
:param ed_zoo: dietary pesticide transfer efficiency (fraction)
:param kgb_zoo: gut - partition coefficient of the pesticide between the gastrointestinal tract
and the organism (-)
:param zoo_wb: wet weight of organism (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([7.5e-4, 0.0525, 5.625e-4], dtype = 'float')
try:
#for this test we'll use the zooplankton variables
kabam_empty.gf_zoo = pd.Series([1.5e-9, 5.0e-5, 4.5e-3], dtype = 'float')
kabam_empty.ed_zoo = pd.Series([0.5, 0.7, 0.25], dtype = 'float')
kabam_empty.kgb_zoo = pd.Series([1.0, 1.5, 0.5], dtype = 'float')
kabam_empty.zoo_wb = pd.Series([1.e-6, 1.e-3, 1.0], dtype = 'float')
result = kabam_empty.fecal_elim_rate_const(kabam_empty.gf_zoo, kabam_empty.ed_zoo,
kabam_empty.kgb_zoo, kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_frac_pest_freely_diss(self):
"""
Calculate Fraction of pesticide freely dissolved in water column (that can be
absorbed via membrane diffusion)
:unit fraction
:expression Kabam Eq. A2
:param conc_poc: Concentration of Particulate Organic Carbon in water column (kg OC/L)
:param kow: octonal-water partition coefficient (-)
:param conc_doc: Concentration of Dissolved Organic Carbon in water column (kg OC/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.13422819, 0.00462963, 0.00514139], dtype = 'float')
try:
#for this test we'll use the zooplankton variables
kabam_empty.conc_poc = pd.Series([1.5e-3, 5.0e-3, 4.5e-4], dtype = 'float')
kabam_empty.alpha_poc = 0.35
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
kabam_empty.conc_doc = pd.Series([1.5e-3, 5.0e-3, 4.5e-4], dtype = 'float')
kabam_empty.alpha_doc = 0.08
result = kabam_empty.frac_pest_freely_diss()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_freely_diss_watercol(self):
"""
concentration of freely dissolved pesticide in overlying water column
:unit g/L
:param phi: Fraction of pesticide freely dissolved in water column (that can be
absorbed via membrane diffusion) (fraction)
:param water_column_eec: Water Column 1-in-10 year EECs (ug/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([1.e-1, 2.4e-2, 1.], dtype = 'float')
try:
#for this test we'll use the zooplankton variables
kabam_empty.phi = pd.Series([0.1, 0.004, 0.05], dtype = 'float')
kabam_empty.water_column_eec = pd.Series([1., 6., 20.], dtype = 'float')
result = kabam_empty.conc_freely_diss_watercol()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_sed_norm_4oc(self):
"""
pesticide concentration in sediment normalized for organic carbon
:unit g/(kg OC)
:expression Kabam Eq. A4a
:param pore_water_eec: freely dissolved pesticide concentration in sediment pore water
:param k_oc: organic carbon partition coefficient (L/kg OC)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([2.5e4, 6.e4, 2.e6], dtype = 'float')
try:
#for this test we'll use the zooplankton variables
kabam_empty.k_oc = pd.Series([2.5e4, 1.e4, 1.e5], dtype = 'float')
kabam_empty.pore_water_eec = pd.Series([1., 6., 20.], dtype = 'float')
result = kabam_empty.conc_sed_norm_4oc()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_sed_dry_wgt(self):
"""
Calculate concentration of chemical in solid portion of sediment
:unit g/(kg dry)
:expression Kabam Eq. A4
:param c_soc: pesticide concentration in sediment normalized for organic carbon g/(kg OC)
:param sediment_oc: fraction organic carbon in sediment (fraction)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.001, 0.0036, 0.4], dtype = 'float')
try:
#for this test we'll use the zooplankton variables
kabam_empty.c_soc = pd.Series([0.025, 0.06, 2.00], dtype = 'float')
kabam_empty.sediment_oc = pd.Series([4., 6., 20.], dtype = 'float')
kabam_empty.sediment_oc_frac = kabam_empty.percent_to_frac(kabam_empty.sediment_oc)
result = kabam_empty.conc_sed_dry_wgt()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_diet_pest_conc(self):
"""
overall concentration of pesticide in aquatic animal/organism diet
:unit g/(kg wet weight)
:expression Kabam Eq. A1 (SUM(Pi * CDi);
:param diet_frac_lfish: fraction of large fish diet containing prey i (Pi in Eq. A1))
:param diet_conc_lfish: concentraiton of pesticide in prey i (CDi in Eq. A1)
:param lipid_content_lfish: fraction of prey i that is lipid
:notes for this test we populate all prey items for large fish even though large fish
typically only consume medium fish
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
result1 = pd.Series([], dtype='float')
result2 = pd.Series([], dtype='float')
expected_results1 = pd.Series([0.2025, 0.2025, 0.205], dtype = 'float')
expected_results2 = pd.Series([5.316667, 4.819048, 4.3], dtype = 'float')
try:
#for this test we'll use the large fish variables (there are 7 prey items listed
#for large fish (sediment, phytoplankton, zooplankton, benthic invertebrates,
# filterfeeders, small fish, and medium fish --- this is the order related
#to the values in the two series below)
kabam_empty.diet_frac_lfish = pd.Series([[0.02, 0.03, 0.10, 0.05, 0.10, 0.7],
[0.0, 0.05, 0.05, 0.05, 0.10, 0.75],
[0.01, 0.02, 0.03, 0.04, 0.10, 0.8]], dtype = 'float')
kabam_empty.diet_conc_lfish = pd.Series([[0.10, 0.10, 0.20, 0.15, 0.30, 0.20],
[0.10, 0.10, 0.20, 0.15, 0.30, 0.20],
[0.10, 0.10, 0.20, 0.15, 0.30, 0.20]], dtype = 'float')
kabam_empty.diet_lipid_content_lfish = pd.Series([[0.0, 0.02, 0.03, 0.03, 0.04, 0.04],
[0.01, 0.025, 0.035, 0.03, 0.04, 0.045],
[0.0, 0.02, 0.03, 0.03, 0.05, 0.05]], dtype = 'float')
result1,result2 = kabam_empty.diet_pest_conc(kabam_empty.diet_frac_lfish,
kabam_empty.diet_conc_lfish,
kabam_empty.diet_lipid_content_lfish)
npt.assert_allclose(result1, expected_results1, rtol=1e-4, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result2, expected_results2, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result1, expected_results1, result2, expected_results2]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_conc_organism(self):
"""
concentration of pesticide in aquatic animal/organism
:unit g/(kg wet weight)
:expression Kabam Eq. A1 (CB)
:param lfish_k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param lfish_k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param lfish_kd: pesticide uptake rate constant for uptake through ingestion of food (kg food/(kg organism - day)
:param lfish_ke: rate constant for elimination of the pesticide through excretion of feces (/d)
:param lfish_kg: animal/organism growth rate constant (/d)
:param lfish_km: rate constant for pesticide metabolic transformation (/d)
:param lfish_mp: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param lfish_mo: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param cwto: total pesticide concentraiton in water column above sediment (g/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (g/L)
:param total_diet_conc_lfish: concentration of pesticide in overall diet of aquatic animal/organism (g/kg wet weight)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([1.97044e-3, 1.85185e-3, 3.97389e-3], dtype = 'float')
try:
kabam_empty.phi = pd.Series([1.0, 1.0, 1.0], dtype = 'float')
kabam_empty.water_column_eec = pd.Series([1.e-3, 1.e-4, 2.e-3], dtype = 'float')
kabam_empty.pore_water_eec = pd.Series([1.e-4, 1.e-5, 2.e-3], dtype = 'float')
#for this test we'll use the large fish variables (and values that may not specifically apply to large fish
kabam_empty.lfish_k1 = pd.Series([10., 5., 2.], dtype = 'float')
kabam_empty.lfish_k2 = pd.Series( [10., 5., 3.], dtype = 'float')
kabam_empty.lfish_kd = pd.Series([0.05, 0.03, 0.02], dtype = 'float')
kabam_empty.lfish_ke = pd.Series([0.05, 0.02, 0.02], dtype = 'float')
kabam_empty.lfish_kg = pd.Series([0.1, 0.01, 0.003], dtype = 'float')
kabam_empty.lfish_km = pd.Series([0.0, 0.1, 0.5], dtype = 'float')
kabam_empty.lfish_mp = pd.Series([0.0, 0.0, 0.05], dtype = 'float')
kabam_empty.lfish_mo = pd.Series([1.0, 1.0, 0.95], dtype = 'float')
kabam_empty.total_diet_conc_lfish = pd.Series( [.20, .30, .50], dtype = 'float')
result = kabam_empty.pest_conc_organism(kabam_empty.lfish_k1, kabam_empty.lfish_k2,
kabam_empty.lfish_kd, kabam_empty.lfish_ke,
kabam_empty.lfish_kg, kabam_empty.lfish_km,
kabam_empty.lfish_mp, kabam_empty.lfish_mo,
kabam_empty.total_diet_conc_lfish)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_lipid_norm_residue_conc(self):
"""
Lipid normalized pesticide residue in aquatic animal/organism
:unit ug/kg-lipid
:expresssion represents a factor (CB/VLB) used in Kabam Eqs. F4, F5, & F6
:param cb_lfish: total pesticide concentration in animal/organism (g/kg-ww)
:param lfish_lipid_frac: fraction of animal/organism that is lipid (fraction)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.025, 0.00833333, 0.0005], dtype = 'float')
try:
#for this test we'll use the large fish variables
kabam_empty.out_cb_lfish = pd.Series([1.e-3, 5.e-4, 1.e-5], dtype = 'float')
kabam_empty.lfish_lipid_frac = pd.Series([0.04, 0.06, 0.02], dtype = 'float')
kabam_empty.gms_to_microgms = 1.e6
result = kabam_empty.lipid_norm_residue_conc(kabam_empty.out_cb_lfish,
kabam_empty.lfish_lipid_frac)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_conc_diet_uptake(self):
"""
:description Pesticide concentration in animal/organism originating from uptake through diet
:unit g/kg ww
:expression Kabam A1 (with k1 = 0)
:param lfish_kD: pesticide uptake rate constant for uptake through ingestion of food (kg food/kg organizm - day)
:param total_diet_conc: overall concentration of pesticide in diet of animal/organism (g/kg-ww)
:param lfish_k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param lfish_kE: rate constant for elimination of the pesticide through excretion of feces (/d)
:param lfish_kG: animal/organism growth rate constant (/d)
:param lfish_kM: rate constant for pesticide metabolic transformation (/d)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([9.8522e-4, 1.75439e-3, 2.83849e-3], dtype = 'float')
try:
#for this test we'll use the large fish variables (and values that may not specifically apply to large fish
kabam_empty.lfish_k2 = pd.Series( [10., 5., 3.], dtype = 'float')
kabam_empty.lfish_kd = pd.Series([0.05, 0.03, 0.02], dtype = 'float')
kabam_empty.lfish_ke = pd.Series([0.05, 0.02, 0.02], dtype = 'float')
kabam_empty.lfish_kg = pd.Series([0.1, 0.01, 0.003], dtype = 'float')
kabam_empty.lfish_km = pd.Series([0.0, 0.1, 0.5], dtype = 'float')
kabam_empty.total_diet_conc_lfish = pd.Series( [.20, .30, .50], dtype = 'float')
result = kabam_empty.pest_conc_diet_uptake(kabam_empty.lfish_kd, kabam_empty.lfish_k2,
kabam_empty.lfish_ke, kabam_empty.lfish_kg,
kabam_empty.lfish_km,
kabam_empty.total_diet_conc_lfish)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_conc_respir_uptake(self):
"""
:description Pesticide concentration in animal/organism originating from uptake through respiration
:unit g/kg ww
:expression Kabam A1 (with kD = 0)
:param lfish_k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param lfish_k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param lfish_kE: rate constant for elimination of the pesticide through excretion of feces (/d)
:param lfish_kG: animal/organism growth rate constant (/d)
:param lfish_kM: rate constant for pesticide metabolic transformation (/d)
:param lfish_mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param lfish_mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param water_column_eec: total pesticide concentraiton in water column above sediment (g/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (g/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([9.8522167e-4, 9.746588e-5, 1.1353959e-3], dtype = 'float')
try:
kabam_empty.phi = pd.Series([1.0, 1.0, 1.0], dtype = 'float')
kabam_empty.water_column_eec = pd.Series([1.e-3, 1.e-4, 2.e-3], dtype = 'float')
kabam_empty.pore_water_eec = pd.Series([1.e-4, 1.e-5, 2.e-3], dtype = 'float')
#for this test we'll use the large fish variables (and values that may not specifically apply to large fish
kabam_empty.lfish_k1 = pd.Series([10., 5., 2.], dtype = 'float')
kabam_empty.lfish_k2 = pd.Series( [10., 5., 3.], dtype = 'float')
kabam_empty.lfish_ke = pd.Series([0.05, 0.02, 0.02], dtype = 'float')
kabam_empty.lfish_kg = pd.Series([0.1, 0.01, 0.003], dtype = 'float')
kabam_empty.lfish_km = pd.Series([0.0, 0.1, 0.5], dtype = 'float')
kabam_empty.lfish_mp = pd.Series([0.0, 0.0, 0.05], dtype = 'float')
kabam_empty.lfish_mo = pd.Series([1.0, 1.0, 0.95], dtype = 'float')
result = kabam_empty.pest_conc_respir_uptake(kabam_empty.lfish_k1, kabam_empty.lfish_k2,
kabam_empty.lfish_ke, kabam_empty.lfish_kg,
kabam_empty.lfish_km, kabam_empty.lfish_mp,
kabam_empty.lfish_mo)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_tot_bioconc_fact(self):
"""
:description Total bioconcentration factor
:unit (ug pesticide/kg ww) / (ug pesticide/L water)
:expression Kabam Eq. F1
:param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param water_column_eec: total pesticide concentraiton in water column above sediment (g/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (g/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.955, 1.00, 0.6666667], dtype = 'float')
try:
kabam_empty.phi = pd.Series([1.0, 1.0, 1.0], dtype = 'float')
kabam_empty.water_column_eec = pd.Series([1.e-3, 1.e-4, 2.e-3], dtype = 'float')
kabam_empty.pore_water_eec = pd.Series([1.e-4, 1.e-5, 2.e-3], dtype = 'float')
#for this test we'll use the large fish variables (and values that may not specifically apply to large fish
kabam_empty.lfish_k1 = | pd.Series([10., 5., 2.], dtype = 'float') | pandas.Series |
# coding: utf-8
import math
import os
import csv
import copy
import time
import statistics
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from logging import getLogger
from functools import wraps
from tqdm import tqdm
from scipy.stats import kde
import warnings
import indicator_utils
tqdm.pandas()
logger = getLogger("__main__").getChild("indicator_evaluation")
warnings.filterwarnings('ignore')
class CalcIndicator(object):
def extract_correspond_point(self, tra_point, eval_point, sec_limit=1.0):
'''
extract correspond point from tra_point
Parameters
----------
tra_point : DataFrame
columns = ['unixtime', 'x_position_m', 'y_position_m']
eval_point_ALAP : DataFrame
evaluation points in ALAP, columns = ['unixtime', 'x_position_m', 'y_position_m']
sec_limit : float
match time limit [sec]
Returns
-------
correspond_df : DataFrame
columns = ['unixtime', 'tra_x', 'tra_y', 'eval_x', 'eval_y', 'correspond_time']
'''
# Calculate euclidean distance
unixtime_list = []
tra_x_list = []
tra_y_list = []
eval_x_list = []
eval_y_list = []
correspond_time_list = []
def Calc_correspond(row):
try:
diff_abs = np.abs(np.full(len(tra_point), row['unixtime']) - tra_point['unixtime'])
min_index = diff_abs.argmin()
if diff_abs[min_index] <= sec_limit:
unixtime_list.append(row['unixtime'])
tra_x_list.append(tra_point['x_position_m'][min_index])
tra_y_list.append(tra_point['y_position_m'][min_index])
eval_x_list.append(row['x_position_m'])
eval_y_list.append(row['y_position_m'])
correspond_time_list.append(diff_abs[min_index])
else: #no match
logger.debug('warning : no match traj_point and eval_point at unixtime {}'.format(row['unixtime']))
except ValueError:
logger.debug('warning : value error occurred at unixtime {}'.format(row['unixtime']))
eval_point.apply(Calc_correspond, axis=1).values
correspond_df = pd.DataFrame({'unixtime' : unixtime_list,
'tra_x' : tra_x_list,
'tra_y' : tra_y_list,
'eval_x' : eval_x_list,
'eval_y' : eval_y_list,
'correspond_time' : correspond_time_list})
return correspond_df
def CE_calculation(self, tra_point, eval_point_ALAP):
'''
Calculate Circular Error (CE)
Parameters
----------
tra_point : DataFrame
columns = ['unixtime', 'x_position_m', 'y_position_m']
eval_point_ALAP : DataFrame
evaluation points in ALAP, columns = ['unixtime', 'x_position_m', 'y_position_m']
Returns
-------
CE_df : DataFrame
columns = ['unixtime', 'tra_x', 'tra_y', 'eval_x', 'eval_y', 'correspond_time', 'CE']
'''
logger.debug('Calculate Circular Error (CE) START')
correspond_df = self.extract_correspond_point(tra_point, eval_point_ALAP)
def Calc_CE(row):
error_m_value = math.hypot(row['tra_x'] - row['eval_x'], row['tra_y'] - row['eval_y'])
return error_m_value
correspond_df['CE'] = correspond_df.apply(Calc_CE, axis=1)
logger.debug('Calculate Circular Error(CE) END')
return correspond_df
def EAG_calculation(self, tra_point, ref_point, eval_point_ALIP):
'''
Calculate Error Accumulation Gradient (EAG)
Parameters
----------
tra_point : DataFrame
columns = ['unixtime', 'x_position_m', 'y_position_m']
ref_point : DataFrame
groudtruth point data for evaluation
eval_point_ALIP : DataFrame
evaluation poins in ALIP, columns = ['unixtime', 'x_position_m', 'y_position_m']
Returns
-------
EAG_df : DataFrame
columns = ['unixtime', 'tra_x', 'tra_y', 'eval_x', 'eval_y', 'correspond_time', 'EAG', 'delta_t']
'''
logger.debug('Calculate Error Accumulation Gradient (EAG) START')
# Calculate unixtime absolute error between reference point and evaluation point in ALIP
def unixtime_delta_min(x):
delta_t_list = abs(np.full(len(ref_point), x) - ref_point['unixtime'])
delta_t_min = min(delta_t_list)
return delta_t_min
correspond_df = self.extract_correspond_point(tra_point, eval_point_ALIP)
eval_point_delta_t = correspond_df['unixtime'].apply(lambda x : unixtime_delta_min(x))
correspond_df.reset_index(drop=True, inplace=True)
# Escape pandas SettingwithCopyWarning
correspond_df = correspond_df.copy()
correspond_df['delta_t'] = eval_point_delta_t.values
def Calc_EAG(row):
error_m_value = math.hypot(row['tra_x'] - row['eval_x'], row['tra_y'] - row['eval_y']) / row['delta_t']
return error_m_value
correspond_df['EAG'] = correspond_df.apply(Calc_EAG, axis=1)
logger.debug('Calculate Error Accumulation Gradient (EAG) END')
return correspond_df
def CP_calculation(self, tra_point, eval_point, band_width=None):
'''
Calculate Calculate Circular Presicion(CP)
Parameters
----------
tra_point : DataFrame
columns = ['unixtime', 'x_position_m', 'y_position_m']
eval_point : DataFrame
groudtruth point data for evaluation
Returns
-------
CP_df : DataFrame
columns = ['unixtime', 'CP', 'correspond_time']
'''
logger.debug('Calculate Calculate Circular Presicion(CP) START')
correspond_df = self.extract_correspond_point(tra_point, eval_point)
error_xy_series = self.calc_error_dist(tra_point, eval_point)
xi, yi, zi = self.calc_kernel_density(error_xy_series['x_error'].to_list(),
error_xy_series['y_error'].to_list(),
bw_method=band_width)
x_mod, y_mod = self.calc_density_mode(xi, yi, zi)
def Calc_CP(row):
error_x = row['tra_x'] - row['eval_x']
error_y = row['tra_y'] - row['eval_y']
error_dist_value = math.hypot(error_x - x_mod, error_y - y_mod)
return error_dist_value
correspond_df['CP'] = correspond_df.apply(Calc_CP, axis=1)
logger.debug('Calculate Presicion Error(CP) END')
return correspond_df
def calc_error_dist(self, tra_point, eval_point):
correspond_df = self.extract_correspond_point(tra_point, eval_point)
def error_m_xy(row):
x_error, y_error = row['tra_x'] - row['eval_x'], row['tra_y'] - row['eval_y']
return pd.Series([x_error, y_error])
result = | pd.DataFrame({'x_error':[], 'y_error':[]}) | pandas.DataFrame |
from . import r_function
import pandas as pd
_install_dyngen = r_function.RFunction(
args="""lib=.libPaths()[1], dependencies=NA,
repos='http://cran.rstudio.com', verbose=TRUE""",
body="""
install.packages(c("dynwrap", "dyngen"),
lib=lib,
repos=repos,
dependencies=dependencies)
""",
)
_get_backbones = r_function.RFunction(
setup="""
library(dyngen)
""",
body="""
names(list_backbones())
""",
)
_DyngenSimulate = r_function.RFunction(
args="""
backbone_name=character(), num_cells=500, num_tfs=100, num_targets=50,
num_hks=25,simulation_census_interval=10, compute_cellwise_grn=FALSE,
compute_rna_velocity=FALSE, n_jobs=7, random_state=NA, verbose=TRUE
""",
setup="""
library(dyngen)
""",
body="""
if (!is.na(random_state)) {
set.seed(random_state)
}
backbones <- list('bifurcating'=backbone_bifurcating(),
'bifurcating_converging'=backbone_bifurcating_converging(),
'bifurcating_cycle'=backbone_bifurcating_cycle(),
'bifurcating_loop'=backbone_bifurcating_loop(),
'binary_tree'=backbone_binary_tree(),
'branching'=backbone_branching(),
'consecutive_bifurcating'=backbone_consecutive_bifurcating(),
'converging'=backbone_converging(),
'cycle'=backbone_cycle(),
'cycle_simple'=backbone_cycle_simple(),
'disconnected'=backbone_disconnected(),
'linear'=backbone_linear(),
'linear_simple'=backbone_linear_simple(),
'trifurcating'=backbone_trifurcating()
)
backbone <- backbones[[backbone_name]]
# silent default behavior of dyngen
if (num_tfs < nrow(backbone$module_info)) {
if (verbose) {
cat("If input num_tfs is less than backbone default,",
"Dyngen uses backbone default.\n")
}
num_tfs <- nrow(backbone$module_info)
}
if (verbose) {
cat('Run Parameters:')
cat('\n\tBackbone:', backbone_name)
cat('\n\tNumber of Cells:', num_cells)
cat('\n\tNumber of TFs:', num_tfs)
cat('\n\tNumber of Targets:', num_targets)
cat('\n\tNumber of HKs:', num_hks, '\n')
}
init <- initialise_model(
backbone=backbone,
num_cells=num_cells,
num_tfs=num_tfs,
num_targets=num_targets,
num_hks=num_hks,
simulation_params=simulation_default(
census_interval=as.double(simulation_census_interval),
kinetics_noise_function = kinetics_noise_simple(mean=1, sd=0.005),
ssa_algorithm = ssa_etl(tau=300/3600),
compute_cellwise_grn=compute_cellwise_grn,
compute_rna_velocity=compute_rna_velocity),
num_cores = n_jobs,
download_cache_dir=NULL,
verbose=verbose
)
out <- generate_dataset(init)
data <- list(cell_info = as.data.frame(out$dataset$cell_info),
expression = as.data.frame(as.matrix(out$dataset$expression)))
if (compute_cellwise_grn) {
data[['bulk_grn']] <- as.data.frame(out$dataset$regulatory_network)
data[['cellwise_grn']] <- as.data.frame(out$dataset$regulatory_network_sc)
}
if (compute_rna_velocity) {
data[['rna_velocity']] <- as.data.frame(as.matrix(out$dataset$rna_velocity))
}
data
""",
)
def install(
lib=None,
dependencies=None,
repos="http://cran.us.r-project.org",
verbose=True,
):
"""Install Dyngen from CRAN.
Parameters
----------
lib: string
Directory to install the package.
If missing, defaults to the first element of .libPaths().
dependencies: boolean, optional (default: None/NA)
When True, installs all packages specified under "Depends", "Imports",
"LinkingTo" and "Suggests".
When False, installs no dependencies.
When None/NA, installs all packages specified under "Depends", "Imports"
and "LinkingTo".
repos: string, optional (default: "http://cran.us.r-project.org"):
R package repository.
verbose: boolean, optional (default: True)
Install script verbosity.
"""
kwargs = {}
if lib is not None:
kwargs["lib"] = lib
if dependencies is not None:
kwargs["dependencies"] = dependencies
_install_dyngen(
repos=repos,
verbose=verbose,
**kwargs,
)
def get_backbones():
"""Output full list of cell trajectory backbones.
Returns
-------
backbones: array of backbone names
"""
return _get_backbones()
def DyngenSimulate(
backbone,
num_cells=500,
num_tfs=100,
num_targets=50,
num_hks=25,
simulation_census_interval=10,
compute_cellwise_grn=False,
compute_rna_velocity=False,
n_jobs=7,
random_state=None,
verbose=True,
force_num_cells=False,
):
"""Simulate dataset with cellular backbone.
The backbone determines the overall dynamic process during a simulation.
It consists of a set of gene modules, which regulate each other such that
expression of certain genes change over time in a specific manner.
DyngenSimulate is a Python wrapper for the R package Dyngen.
Default values obtained from Github vignettes.
For more details, read about Dyngen on Github_.
.. _Github: https://github.com/dynverse/dyngen
Parameters
----------
backbone: string
Backbone name from dyngen list of backbones.
Get list with get_backbones()).
num_cells: int, optional (default: 500)
Number of cells.
num_tfs: int, optional (default: 100)
Number of transcription factors.
The TFs are the main drivers of the molecular changes in the simulation.
A TF can only be regulated by other TFs or itself.
NOTE: If num_tfs input is less than nrow(backbone$module_info),
Dyngen will default to nrow(backbone$module_info).
This quantity varies between backbones and with each run (without seed).
It is generally less than 75.
It is recommended to input num_tfs >= 100 to stabilize the output.
num_targets: int, optional (default: 50)
Number of target genes.
Target genes are regulated by a TF or another target gene,
but are always downstream of at least one TF.
num_hks: int, optional (default: 25)
Number of housekeeping genees.
Housekeeping genes are completely separate from any TFs or target genes.
simulation_census_interval: int, optional (default: 10)
Stores the abundance levels only after a specific interval has passed.
The lower the interval, the higher detail of simulation trajectory retained,
though many timepoints will contain similar information.
compute_cellwise_grn: boolean, optional (default: False)
If True, computes the ground truth cellwise gene regulatory networks.
Also outputs ground truth bulk (entire dataset) regulatory network.
NOTE: Increases compute time significantly.
compute_rna_velocity: boolean, optional (default: False)
If true, computes the ground truth propensity ratios after simulation.
NOTE: Increases compute time significantly.
n_jobs: int, optional (default: 8)
Number of cores to use.
random_state: int, optional (default: None)
Fixes seed for simulation generator.
verbose: boolean, optional (default: True)
Data generation verbosity.
force_num_cells: boolean, optional (default: False)
Dyngen occassionally produces fewer cells than specified.
Set this flag to True to rerun Dyngen until correct cell count is reached.
Returns
-------
Dictionary data of pd.DataFrames:
data['cell_info']: pd.DataFrame, shape (n_cells, 4)
Columns: cell_id, step_ix, simulation_i, sim_time
sim_time is the simulated timepoint for a given cell.
data['expression']: pd.DataFrame, shape (n_cells, n_genes)
Log-transformed counts with dropout.
If compute_cellwise_grn is True,
data['bulk_grn']: pd.DataFrame, shape (n_tf_target_interactions, 4)
Columns: regulator, target, strength, effect.
Strength is positive and unbounded.
Effect is either +1 (for activation) or -1 (for inhibition).
data['cellwise_grn']: pd.DataFrame, shape (n_tf_target_interactions_per_cell, 4)
Columns: cell_id, regulator, target, strength.
The output does not include all edges per cell.
The regulatory effect lies between [−1, 1], where -1 is complete inhibition
of target by TF, +1 is maximal activation of target by TF,
and 0 is inactivity of the regulatory interaction between R and T.
If compute_rna_velocity is True,
data['rna_velocity']: pd.DataFrame, shape (n_cells, n_genes)
Propensity ratios for each cell.
Example
--------
>>> import scprep
>>> scprep.run.dyngen.install()
>>> backbones = scprep.run.dyngen.get_backbones()
>>> data = scprep.run.DyngenSimulate(backbone=backbones[0])
"""
if backbone not in get_backbones():
raise ValueError(
(
"Input not in default backbone list. "
"Choose backbone from get_backbones()"
)
)
kwargs = {}
if random_state is not None:
kwargs["random_state"] = random_state
rdata = _DyngenSimulate(
backbone_name=backbone,
num_cells=num_cells,
num_tfs=num_tfs,
num_targets=num_targets,
num_hks=num_hks,
simulation_census_interval=simulation_census_interval,
compute_cellwise_grn=compute_cellwise_grn,
compute_rna_velocity=compute_rna_velocity,
n_jobs=n_jobs,
verbose=verbose,
rpy_verbose=verbose,
**kwargs,
)
if force_num_cells:
if random_state is None:
random_state = -1
if pd.DataFrame(rdata["cell_info"]).shape[0] != num_cells:
random_state += 1
rdata = DyngenSimulate(
backbone=backbone,
num_cells=num_cells,
num_tfs=num_tfs,
num_targets=num_targets,
num_hks=num_hks,
simulation_census_interval=simulation_census_interval,
compute_cellwise_grn=compute_cellwise_grn,
compute_rna_velocity=compute_rna_velocity,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state,
force_num_cells=force_num_cells,
)
data = {}
data["cell_info"] = | pd.DataFrame(rdata["cell_info"]) | pandas.DataFrame |
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
import pandas as pd
from pandas import Series, date_range
import pandas._testing as tm
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
| tm.assert_almost_equal(result, expected) | pandas._testing.assert_almost_equal |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 09:40:49 2018
@author: yuwei
"""
import pandas as pd
import numpy as np
import math
import random
import time
import scipy as sp
import xgboost as xgb
def loadData():
"下载数据"
trainSet = pd.read_table('round1_ijcai_18_train_20180301.txt',sep=' ')
testSet = pd.read_table('round1_ijcai_18_test_a_20180301.txt',sep=' ')
return trainSet,testSet
def splitData(trainSet,testSet):
"按时间划分验证集"
#转化测试集时间戳为标准时间
time_local = testSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
testSet['context_timestamp'] = time_local
#转化训练集时间戳为标准时间
time_local = trainSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
trainSet['context_timestamp'] = time_local
del time_local
#处理训练集item_category_list属性
trainSet['item_category_list'] = trainSet.item_category_list.map(lambda x :x.split(';'))
trainSet['item_category_list_2'] = trainSet.item_category_list.map(lambda x :x[1])
trainSet['item_category_list_3'] = trainSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
trainSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,trainSet['item_category_list_2'],trainSet['item_category_list_3']))
#处理测试集item_category_list属性
testSet['item_category_list'] = testSet.item_category_list.map(lambda x :x.split(';'))
testSet['item_category_list_2'] = testSet.item_category_list.map(lambda x :x[1])
testSet['item_category_list_3'] = testSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
testSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,testSet['item_category_list_2'],testSet['item_category_list_3']))
del trainSet['item_category_list_3'];del testSet['item_category_list_3'];
#处理predict_category_property的排名
trainSet['predict_category'] = trainSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
trainSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,trainSet['item_category_list_2'],trainSet['predict_category']))
testSet['predict_category'] = testSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
testSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,testSet['item_category_list_2'],testSet['predict_category']))
#统计item_category_list中和predict_category共同的个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
#不同个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
del trainSet['predict_category']; del testSet['predict_category']
"划分数据集"
#测试集 23-24号特征提取,25号打标
test = testSet
testFeat = trainSet[trainSet['context_timestamp']>'2018-09-23']
#验证集 22-23号特征提取,24号打标
validate = trainSet[trainSet['context_timestamp']>'2018-09-24']
validateFeat = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-24')]
#训练集 21-22号特征提取,23号打标;20-21号特征提取,22号打标;19-20号特征提取,21号打标;18-19号特征提取,20号打标
#标签区间
train1 = trainSet[(trainSet['context_timestamp']>'2018-09-23') & (trainSet['context_timestamp']<'2018-09-24')]
train2 = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-23')]
train3 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-22')]
train4 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-21')]
#特征区间
trainFeat1 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-23')]
trainFeat2 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-22')]
trainFeat3 = trainSet[(trainSet['context_timestamp']>'2018-09-19') & (trainSet['context_timestamp']<'2018-09-21')]
trainFeat4 = trainSet[(trainSet['context_timestamp']>'2018-09-18') & (trainSet['context_timestamp']<'2018-09-20')]
return test,testFeat,validate,validateFeat,train1,trainFeat1,train2,trainFeat2,train3,trainFeat3,train4,trainFeat4
def modelXgb(train,test):
"xgb模型"
train_y = train['is_trade'].values
# train_x = train.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property'
# ],axis=1).values
#根据皮卡尔相关系数,drop相关系数低于-0.2的属性
train_x = train.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property','is_trade',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first'
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service',
],axis=1).values
# test_x = test.drop(['item_brand_id',
# 'item_city_id','user_id','shop_id','context_id',
# 'instance_id', 'item_id','item_category_list',
# 'item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade',
# 'item_price_level','user_rank_down',
# 'item_category_list_2_not_buy_count',
# 'item_category_list_2_count',
# 'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
# ],axis=1).values
test_x = test.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
],axis=1).values
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 模型参数
params = {'booster': 'gbtree',
'objective':'binary:logistic',
'eval_metric':'logloss',
'eta': 0.03,
'max_depth': 5, # 6
'colsample_bytree': 0.8,#0.8
'subsample': 0.8,
'scale_pos_weight': 1,
'min_child_weight': 18 # 2
}
# 训练
watchlist = [(dtrain,'train')]
bst = xgb.train(params, dtrain, num_boost_round=700,evals=watchlist)
# 预测
predict = bst.predict(dtest)
# test_xy = test[['instance_id','is_trade']]
test_xy = test[['instance_id']]
test_xy['predicted_score'] = predict
return test_xy
def get_item_feat(data,dataFeat):
"item的特征提取"
result = pd.DataFrame(dataFeat['item_id'])
result = result.drop_duplicates(['item_id'],keep='first')
"1.统计item出现次数"
dataFeat['item_count'] = dataFeat['item_id']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_count',aggfunc='count').reset_index()
del dataFeat['item_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"2.统计item历史被购买的次数"
dataFeat['item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_buy_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"3.统计item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_buy_count,result.item_count))
result['item_buy_ratio'] = buy_ratio
"4.统计item历史未被够买的次数"
result['item_not_buy_count'] = result['item_count'] - result['item_buy_count']
return result
def get_user_feat(data,dataFeat):
"user的特征提取"
result = pd.DataFrame(dataFeat['user_id'])
result = result.drop_duplicates(['user_id'],keep='first')
"1.统计user出现次数"
dataFeat['user_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_count',aggfunc='count').reset_index()
del dataFeat['user_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"2.统计user历史被购买的次数"
dataFeat['user_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_buy_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"3.统计user转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_buy_count,result.user_count))
result['user_buy_ratio'] = buy_ratio
"4.统计user历史未被够买的次数"
result['user_not_buy_count'] = result['user_count'] - result['user_buy_count']
return result
def get_context_feat(data,dataFeat):
"context的特征提取"
result = pd.DataFrame(dataFeat['context_id'])
result = result.drop_duplicates(['context_id'],keep='first')
"1.统计context出现次数"
dataFeat['context_count'] = dataFeat['context_id']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_count',aggfunc='count').reset_index()
del dataFeat['context_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"2.统计context历史被购买的次数"
dataFeat['context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_buy_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"3.统计context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_buy_count,result.context_count))
result['context_buy_ratio'] = buy_ratio
"4.统计context历史未被够买的次数"
result['context_not_buy_count'] = result['context_count'] - result['context_buy_count']
return result
def get_shop_feat(data,dataFeat):
"shop的特征提取"
result = pd.DataFrame(dataFeat['shop_id'])
result = result.drop_duplicates(['shop_id'],keep='first')
"1.统计shop出现次数"
dataFeat['shop_count'] = dataFeat['shop_id']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_count',aggfunc='count').reset_index()
del dataFeat['shop_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"2.统计shop历史被购买的次数"
dataFeat['shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_buy_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"3.统计shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_buy_count,result.shop_count))
result['shop_buy_ratio'] = buy_ratio
"4.统计shop历史未被够买的次数"
result['shop_not_buy_count'] = result['shop_count'] - result['shop_buy_count']
return result
def get_timestamp_feat(data,dataFeat):
"context_timestamp的特征提取"
result = pd.DataFrame(dataFeat['context_timestamp'])
result = result.drop_duplicates(['context_timestamp'],keep='first')
"1.统计context_timestamp出现次数"
dataFeat['context_timestamp_count'] = dataFeat['context_timestamp']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['context_timestamp_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"2.统计context_timestamp历史被购买的次数"
dataFeat['context_timestamp_buy_count'] = dataFeat['is_trade']
feat = | pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_buy_count',aggfunc='sum') | pandas.pivot_table |
# author: <NAME>, <NAME>, <NAME>, <NAME>
# date: 2020-06-02
"""
This script cleans the census dataset for a given year and saves them to
the file_path provided. This script takes the census year and the csv file
containing the census data as arguments.
Usage: src/02_clean_wrangle/05_clean_census.py --census_file=<census_file> \
--year=<year> \
--file_path=<file_path>
Options:
--census_file=<census_file> csv file containing census data,
including file path.
--year=<year> census year.
--file_path=<file_path> Path to the exported files folder.
"""
from docopt import docopt
import pandas as pd
import os
import re
import warnings
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore")
opt = docopt(__doc__)
def create_subgroup_dict(df, year):
# separate dataframe by 'Variables' containing regex expressions:
if year == 2001:
re1 = ['total.*by', 'population.*by', 'common-law couples',
'^Male', '^Female', 'total - male', 'total - female']
elif year == 2006:
re1 = [r'total.*by', r'population.*by', r'common-law couples',\
r'^Male[s\s,]', r'^Female[s\s,]', r'total - mobility',\
r'Average number of children']
elif year == 2011:
df.drop(index=201, inplace=True)
re1 = ['total.*by', 'population.*by', 'common-law couples',
'males', 'Total population excluding institutional residents',
'Total.*in private households']
elif year == 2016:
re1 = ['^total', 'population.*by', 'males']
subgroup = list(df[df.Variable.str.contains('|'.join(re1),
flags=re.IGNORECASE)].index)
subgroup.append(len(df.Variable)+1)
subgroup = subgroup[1:]
# create census dictionary of sub datasets
# initialize variables for the lookup dictionary
start = 0
census_dict = {}
for s in subgroup:
sub_df = df.loc[start:s-1]
# transpose dataframe and rename column
sub_df = sub_df.set_index('Variable').T.reset_index()
sub_df = sub_df.rename(columns={'index': 'LocalArea'})
# check for duplicates and store dataframes into the dictionary
if df.Variable[start] in census_dict:
start = s
else:
census_dict[df.Variable[start]] = sub_df
start = s
return census_dict
###########################################################################
# HELPER FUNCTIONS
###########################################################################
def clean_age(census_dict, year, file_path):
if year == 2001:
col_names = ['LocalArea', 'Type', 'Total', '0 to 4 years',
'5 to 9 years', '10 to 14 years', '15 to 19 years',
'20 to 24 years', '25 to 29 years', '30 to 34 years',
'35 to 39 years', '40 to 44 years', '45 to 49 years',
'50 to 54 years', '55 to 59 years', '60 to 64 years',
'65 to 69 years', '70 to 74 years', '75 to 79 years',
'80 to 84 years', '85 to 89 years', '90 to 94 years',
'95 to 99 years', '100 years and over']
male = census_dict['Male']
female = census_dict['Female']
female.insert(1, 'Type', 'female')
female.set_axis(col_names, axis=1, inplace=True)
male.insert(1, 'Type', 'male')
male.set_axis(col_names, axis=1, inplace=True)
merged = pd.concat([female, male])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
total = merged.groupby('LocalArea').sum()
total['Type'] = 'total'
total.reset_index(inplace=True)
merged = pd.concat([merged, total])
else:
if year == 2006:
col_names = ['LocalArea', 'Type', 'Total', '0 to 4 years',
'5 to 9 years', '10 to 14 years', '15 to 19 years',
'20 to 24 years', '25 to 29 years', '30 to 34 years',
'35 to 39 years', '40 to 44 years', '45 to 49 years',
'50 to 54 years', '55 to 59 years', '60 to 64 years',
'65 to 69 years', '70 to 74 years', '75 to 79 years',
'80 to 84 years', '85 to 89 years', '90 to 94 years',
'95 to 99 years', '100 years and over', 'Median Age']
total = census_dict['Male & Female, Total']
male = census_dict['Male, Total']
female = census_dict['Female, Total']
elif year == 2011:
col_names = ['LocalArea', 'Type', 'Total', '0 to 4 years',
'5 to 9 years', '10 to 14 years', '15 to 19 years',
'15 years', '16 years', '17 years', '18 years',
'19 years', '20 to 24 years', '25 to 29 years',
'30 to 34 years', '35 to 39 years', '40 to 44 years',
'45 to 49 years', '50 to 54 years', '55 to 59 years',
'60 to 64 years', '65 to 69 years', '70 to 74 years',
'75 to 79 years', '80 to 84 years',
'85 years and over', 'Median age',
'% of the population aged 15 and over']
total = census_dict['Total population by age groups']
male = census_dict['Males, total']
female = census_dict['Females, total']
elif year == 2016:
col_names = ['LocalArea', 'Type', 'Total', '0 to 14 years',
'0 to 4 years', '5 to 9 years', '10 to 14 years',
'15 to 64 years', '15 to 19 years',
'20 to 24 years', '25 to 29 years',
'30 to 34 years', '35 to 39 years',
'40 to 44 years', '45 to 49 years',
'50 to 54 years', '55 to 59 years',
'60 to 64 years', '65 years and over',
'65 to 69 years', '70 to 74 years',
'75 to 79 years', '80 to 84 years',
'85 years and over', '85 to 89 years',
'90 to 94 years', '95 to 99 years',
'100 years and over']
total = census_dict['Total - Age groups and average age of the population - 100% data']
male = census_dict['Total - Age groups and average age of males - 100% data']
female = census_dict['Total - Age groups and average age of females - 100% data']
female.insert(1, 'Type', 'female')
female.set_axis(col_names, axis=1, inplace=True)
male.insert(1, 'Type', 'male')
male.set_axis(col_names, axis=1, inplace=True)
total.insert(1, 'Type', 'total')
total.set_axis(col_names, axis=1, inplace=True)
merged = pd.concat([female, male, total])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['population by age and sex'] = merged
merged.to_csv(file_path + '/population_age_sex.csv')
return census_dict
###############################################################################
def clean_marital_status(census_dict, year, file_path):
if year in [2001, 2006]:
col_names = ['LocalArea', 'Total population 15 years and over',
'Single (never legally married)', 'Married',
'Separated', 'Divorced', 'Widowed', 'total x',
'Not living common law', 'Living common law']
cols_ord = ['LocalArea', 'Total population 15 years and over',
'Married or living with a or common-law partner',
'Married', 'Living common law',
'Not living with a married spouse or common-law partner',
'Single (never legally married)', 'Separated',
'Divorced', 'Widowed']
df1 = census_dict['Total population 15 years and over by legal marital status']
df2 = census_dict['Total population 15 years and over by common-law status']
merged = pd.merge(df1, df2, on=['LocalArea'])
merged.set_axis(col_names, axis=1, inplace=True)
merged['Married or living with a or common-law partner'] = merged['Married'] + merged['Living common law']
merged['Not living with a married spouse or common-law partner'] = merged['Total population 15 years and over'] - merged['Married or living with a or common-law partner']
merged = merged[cols_ord]
else:
if year == 2011:
total = census_dict['Total population 15 years and over by marital status']
male = census_dict['Males 15 years and over by marital status']
female = census_dict['Females 15 years and over by marital status']
elif year == 2016:
total = census_dict['Total - Marital status for the population aged 15 years and over - 100% data']
male = census_dict['Total - Marital status for males aged 15 years and over - 100% data']
female = census_dict['Total - Marital status for females aged 15 years and over - 100% data']
col_names = ['LocalArea', 'Type',
'Total population 15 years and over',
'Married or living with a or common-law partner',
'Married', 'Living common law',
'Not living with a married spouse or common-law partner',
'Single (never legally married)', 'Separated',
'Divorced', 'Widowed']
female.insert(1, 'Type', 'female')
female.set_axis(col_names, axis=1, inplace=True)
male.insert(1, 'Type', 'male')
male.set_axis(col_names, axis=1, inplace=True)
total.insert(1, 'Type', 'total')
total.set_axis(col_names, axis=1, inplace=True)
merged = pd.concat([female, male, total])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['marital status'] = merged
merged.to_csv(file_path + '/marital_status.csv')
return census_dict
###############################################################################
def clean_couple_fam_structure(census_dict, year, file_path):
col_names = ['LocalArea', 'Type', 'Total', 'Without children at home',
'With children at home', '1 child', '2 children',
'3 or more children']
if year == 2016:
total = census_dict['Total - Couple census families in private households - 100% data']
total.insert(1, 'Type', 'total couples')
total.set_axis(col_names, axis=1, inplace=True)
census_dict['couples - family structure'] = total
total.to_csv(file_path + '/couples_family_structure.csv')
else:
if year in [2011, 2006]:
married = census_dict['Total couple families by family structure and number of children']
married = married[['LocalArea', 'Married couples',
'Without children at home',
'With children at home', '1 child',
'2 children', '3 or more children']]
common_law = census_dict['Common-law couples']
elif year == 2001:
married = census_dict['Total couple families by family structure']
married = married[['LocalArea', 'Married couples',
'Without children at home',
'With children at home', '1 child',
'2 children', '3 or more children']]
common_law = census_dict['Common-law couples']
married.insert(1, 'Type', 'married couples')
married.set_axis(col_names, axis=1, inplace=True)
common_law.insert(1, 'Type', 'common-law couples')
common_law.set_axis(col_names, axis=1, inplace=True)
merged = pd.concat([married, common_law])
total = merged.groupby('LocalArea').sum()
total['Type'] = 'total couples'
total.reset_index(inplace=True)
merged = pd.concat([merged, total])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['couples - family structure'] = merged
merged.to_csv(file_path + '/couples_family_structure.csv')
return census_dict
###############################################################################
def clean_language_detailed(census_dict, year, file_path):
if year == 2006:
mt_total = census_dict['Total population by mother tongue']
home_total = census_dict['Total population by language spoken most often at home']
home_total = home_total.iloc[:, 0:104].copy()
work_total = census_dict['Total population 15 years and over who worked since January 1, 2005 by language used most often at work']
mt_total.rename(columns={mt_total.columns[1]: 'Total'}, inplace=True)
mt_total.insert(1, 'Type', 'mother tongue - total')
home_total.rename(columns={home_total.columns[1]: 'Total'},
inplace=True)
home_total.insert(1, 'Type',
'language most often spoken at home - total')
work_total.rename(columns={work_total.columns[1]: 'Total'},
inplace=True)
work_total.insert(1, 'Type',
'language most often spoken at work - total')
merged = pd.concat([mt_total, home_total, work_total])
elif year == 2001:
mt_total = census_dict['Total population by mother tongue']
home_total = census_dict['Total population by home language']
home_total = home_total.groupby(home_total.columns, axis=1).sum()
mt_total.rename(columns={mt_total.columns[1]: 'Total'}, inplace=True)
mt_total.insert(1, 'Type', 'mother tongue - total')
home_total.rename(columns={'Total population by home language': 'Total'}, inplace=True)
home_total.insert(1, 'Type', 'language most often spoken at home - total')
merged = pd.concat([mt_total, home_total])
else:
if year == 2011:
mt_total = census_dict['Detailed mother tongue - Total population excluding institutional residents']
mt_male = census_dict['Detailed mother tongue - Males excluding institutional residents']
mt_female = census_dict['Detailed mother tongue - Females excluding institutional residents']
home_total = census_dict['Detailed language spoken most often at home - Total population excluding institutional residents']
home_male = census_dict['Detailed language spoken most often at home - Males excluding institutional residents']
home_female = census_dict['Detailed language spoken most often at home - Females excluding institutional residents']
home2_total = census_dict['Detailed other language spoken regularly at home - Total population excluding institutional residents']
home2_male = census_dict['Detailed other language spoken regularly at home - Males excluding institutional residents']
home2_female = census_dict['Detailed other language spoken regularly at home - Females excluding institutional residents']
elif year == 2016:
mt_total = census_dict['Total - Mother tongue for the total population excluding institutional residents - 100% data']
mt_male = census_dict['Total - Mother tongue for males excluding institutional residents - 100% data']
mt_female = census_dict['Total - Mother tongue for females excluding institutional residents - 100% data']
home_total = census_dict['Total - Language spoken most often at home for the total population excluding institutional residents - 100% data']
home_male = census_dict['Total - Language spoken most often at home for males excluding institutional residents - 100% data']
home_female = census_dict['Total - Language spoken most often at home for females excluding institutional residents - 100% data']
home2_total = census_dict['Total - Other language(s) spoken regularly at home for the total population excluding institutional residents - 100% data']
home2_male = census_dict['Total - Other language(s) spoken regularly at home for males excluding institutional residents - 100% data']
home2_female = census_dict['Total - Other language(s) spoken regularly at home for females excluding institutional residents - 100% data']
mt_female.rename(columns={mt_female.columns[1]: 'Total'}, inplace=True)
mt_female.insert(1, 'Type', 'mother tongue - female')
mt_male.rename(columns={mt_male.columns[1]: 'Total'}, inplace=True)
mt_male.insert(1, 'Type', 'mother tongue - male')
mt_total.rename(columns={mt_total.columns[1]: 'Total'}, inplace=True)
mt_total.insert(1, 'Type', 'mother tongue - total')
home_female.rename(columns={home_female.columns[1]: 'Total'}, inplace=True)
home_female.insert(1, 'Type', 'language most often spoken at home - female')
home_male.rename(columns={home_male.columns[1]: 'Total'}, inplace=True)
home_male.insert(1, 'Type', 'language most often spoken at home - male')
home_total.rename(columns={home_total.columns[1]: 'Total'}, inplace=True)
home_total.insert(1, 'Type', 'language most often spoken at home - total')
home2_female.rename(columns={home2_female.columns[1]: 'Total'}, inplace=True)
home2_female.insert(1, 'Type', 'other language spoken at home - female')
home2_male.rename(columns={home2_male.columns[1]: 'Total'}, inplace=True)
home2_male.insert(1, 'Type', 'other language spoken at home - male')
home2_total.rename(columns={home2_total.columns[1]: 'Total'}, inplace=True)
home2_total.insert(1, 'Type', 'other language spoken at home - total')
merged = pd.concat([mt_female, mt_male, mt_total,
home_female, home_male, home_total,
home2_female, home2_male, home2_total])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['detailed language'] = merged
merged.to_csv(file_path + '/detailed_language.csv')
return census_dict
###############################################################################
def clean_official_language(census_dict, year, file_path):
col_names = ['LocalArea', 'Type', 'Total', 'English', 'French',
'English and French', 'Neither English nor French']
if year == 2016:
known = census_dict['Total - Knowledge of official languages for the total population excluding institutional residents - 100% data']
first = census_dict['Total - First official language spoken for the total population excluding institutional residents - 100% data']
elif year == 2011:
known = census_dict['Knowledge of official languages - Total population excluding institutional residents']
first = census_dict['First official language spoken - Total population excluding institutional residents']
elif year in [2001, 2006]:
known = census_dict['Total population by knowledge of official languages']
first = census_dict['Total population by first official language spoken']
known.insert(1, 'Type', 'knowledge of official languages')
known.set_axis(col_names, axis=1, inplace=True)
first.insert(1, 'Type', 'first official language spoken')
first.set_axis(col_names, axis=1, inplace=True)
merged = pd.concat([known, first])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['official language'] = merged
merged.to_csv(file_path + '/official_language.csv')
return census_dict
###############################################################################
def clean_structural_dwelling_type(census_dict, year, file_path):
if year == 2006:
col_names = ['LocalArea', 'Total', 'Single-detached house',
'Semi-detached house', 'Row house', 'Apartment, duplex',
'Apartment, building that has five or more storeys']
df = census_dict['Total number of occupied private dwellings by structural type of dwelling']
elif year in [2001, 2011, 2016]:
col_names = ['LocalArea', 'Total', 'Single-detached house',
'Semi-detached house', 'Row house',
'Apartment, detached duplex',
'Apartment, building that has five or more storeys',
'Apartment, building that has fewer than five storeys',
'Other single-attached house', 'Movable dwelling']
if year == 2001:
df = census_dict['Total number of occupied private dwellings by structural type of dwelling']
df = df.iloc[:, 0:10].copy()
elif year == 2011:
df = census_dict['Total number of occupied private dwellings by structural type of dwelling']
df = df[['LocalArea',
'Total number of occupied private dwellings by structural type of dwelling',
'Single-detached house', 'Semi-detached house',
'Row house', 'Apartment, duplex',
'Apartment, building that has five or more storeys',
'Apartment, building that has fewer than five storeys',
'Other single-attached house', 'Movable dwelling']].copy()
elif year == 2016:
df = census_dict['Total - Occupied private dwellings by structural type of dwelling - 100% data']
df = df[['LocalArea',
'Total - Occupied private dwellings by structural type of dwelling - 100% data',
'Single-detached house', 'Semi-detached house',
'Row house', 'Apartment or flat in a duplex',
'Apartment in a building that has five or more storeys',
'Apartment in a building that has fewer than five storeys',
'Other single-attached house', 'Movable dwelling']].copy()
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['structural dwelling type'] = df
df.to_csv(file_path + '/structural_dwelling_type.csv')
return census_dict
###############################################################################
def clean_household_size(census_dict, year, file_path):
if year == 2001:
col_names = ['LocalArea', 'Total households', '1 person', '2 persons',
'3 persons', '4 to 5 persons', '6 or more persons',
'Average household size']
df = census_dict['Total number of private households by household size']
elif year in [2006, 2011]:
col_names = ['LocalArea', 'Total households', '1 person', '2 persons',
'3 persons', '4 to 5 persons', '6 or more persons',
'Number of persons in private households',
'Average household size']
df = census_dict['Total number of private households by household size']
elif year == 2016:
col_names = ['LocalArea', 'Total households', '1 person', '2 persons',
'3 persons', '4 persons', '5 or more persons',
'Number of persons in private households',
'Average household size']
df = census_dict['Total - Private households by household size - 100% data']
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['household size'] = df
df.to_csv(file_path + '/household_size.csv')
return census_dict
###############################################################################
def clean_lone_parent(census_dict, year, file_path):
col_names = ['LocalArea', 'Total lone-parent families', 'Female parent',
'Male parent', '1 child', '2 children', '3 or more children']
if year == 2016:
df1 = census_dict["Total lone-parent families by sex of parent"]
df2 = census_dict["Total - Lone-parent census families in private households - 100% data"]
df = pd.concat([df1, df2], axis=1)
df = df.groupby(df.columns, axis=1).first()
df = df[['LocalArea', 'Total lone-parent families by sex of parent',
'Female parent', 'Male parent', '1 child', '2 children',
'3 or more children']].copy()
elif year == 2011:
df = census_dict['Total lone-parent families by sex of parent and number of children']
df = df.groupby(df.columns, axis=1).sum()
df = df[['LocalArea',
'Total lone-parent families by sex of parent and number of children',
'Female parent', 'Male parent', '1 child', '2 children',
'3 or more children']].copy()
elif year == 2006:
df1 = census_dict['Total lone-parent families by sex of parent and number of children']
df2 = census_dict['Female parent']
df2 = df2.iloc[:, 1:5].copy()
df3 = census_dict['Male parent']
df3 = df3.iloc[:, 1:5].copy()
df = | pd.concat([df1, df2, df3], axis=1) | pandas.concat |
# -*- encoding: utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
from configue import TRAIN_PERIOD
from configue import T
data = | pd.read_csv('./data/original_data.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from html.parser import HTMLParser
import html
from sklearn import preprocessing
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils import shuffle
## TODO cosine without new users
# just desc
# dec and categ (handle categories differently ?)
# dec and categ with NLP
# trends for new users
# new user cutoff search
# trends also for old users
# count add to carts ?
dfUsersPurchased = None
dfUsersAddedCart = None
userActionsCounts = None
dfProducts = None
dfUserActions = None
test = None
predictionsDF = None
train = None
topProducts = None
TRAIN_SPLIT = 0.6
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs= True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(htmlString):
#there is some missing and or corrupt data so handle it properly
try:
#fuck you
htmlString = html.unescape(htmlString)
s = MLStripper()
s.feed(htmlString)
except:
return ""
return s.get_data()
def preProcess():
global dfUsersPurchased
global dfUsersAddedCart
global dfUsersViewed
global userActionsCounts
global dfProducts
global dfUserActions
dfUserActions = | pd.read_csv("C:\\Users\\igorf\\PycharmProjects\\vinf2\\data\\vi_dataset_events.csv") | pandas.read_csv |
import csv
import glob
import re
from pathlib import Path
import numpy as np
import pandas as pd
from model import LSTM, ForecastDataset
from plumbum import cli
from preprocessor import Preprocessor
class ClusterForecaster:
"""
Predict cluster in workload using trained LSTMs.
Attributes
----------
prediction_interval : pd.Timedelta
Time interval to aggregate cluster counts by.
prediction_horizon : pd.Timedelta
The prediction horizon of the models to train.
prediction_seqlen : int
Number of intervals to feed the LSTM for a prediction.
models : Dict[int, LSTM]
Dictionary of trained models to perform inference by
"""
MODEL_PREFIX = "model_"
@staticmethod
def cluster_to_file(path, cluster):
"""Generate model file path from cluster name"""
return f"{path}/{ClusterForecaster.MODEL_PREFIX}{cluster}.pkl"
@staticmethod
def get_cluster_from_file(filename):
"""Infer cluster id from file name"""
m = re.search(f"(?<={ClusterForecaster.MODEL_PREFIX})[^/]*(?=\\.pkl)", filename)
if m is None:
raise RuntimeError("Could not get cluster name")
return m[0]
def __init__(
self,
train_df,
prediction_seqlen,
prediction_interval,
prediction_horizon,
save_path,
top_k=5,
override=False,
):
"""Construct the ClusterForecaster object.
Parameters
----------
train_df : pd.DataFrame
Training data grouped by cluster and timestamp
save_path : str
Directory for loading/saving trained models
top_k : int
Only train models for the top k most common clusters.
override : bool
Determines whether we should (re)train models anyway, even if they are
in the directory.
"""
assert train_df.index.names[0] == "cluster"
assert train_df.index.names[1] == "log_time_s"
self.prediction_seqlen = prediction_seqlen
self.prediction_interval = prediction_interval
self.prediction_horizon = prediction_horizon
self.models = {}
if not override:
model_files = glob.glob(str(Path(save_path) / f"{self.MODEL_PREFIX}*.pkl"))
for filename in model_files:
cluster_name = self.get_cluster_from_file(filename)
self.models[int(cluster_name)] = LSTM.load(filename)
print(f"loaded model for cluster {cluster_name}")
print(f"Loaded {len(model_files)} models")
if train_df is None:
return
# Only consider top k clusters.
cluster_totals = train_df.groupby(level=0).sum().sort_values(by="count", ascending=False)
labels = cluster_totals.index[:top_k]
print("Training on cluster time series..")
mintime = train_df.index.get_level_values(1).min()
maxtime = train_df.index.get_level_values(1).max()
dtindex = pd.DatetimeIndex([mintime, maxtime])
for cluster in labels:
if cluster in self.models and not override:
print(f"Already have model for cluster {cluster}, skipping")
continue
print(f"training model for cluster {cluster}")
cluster_counts = train_df[train_df.index.get_level_values(0) == cluster].droplevel(0)
# This zero-fills the start and ends of the cluster time series.
cluster_counts = cluster_counts.reindex(cluster_counts.index.append(dtindex), fill_value=0)
cluster_counts = cluster_counts.resample(prediction_interval).sum()
self._train_cluster(cluster_counts, cluster, save_path)
def _train_cluster(self, cluster_counts, cluster, save_path):
dataset = ForecastDataset(
cluster_counts,
sequence_length=self.prediction_seqlen,
horizon=self.prediction_horizon,
interval=self.prediction_interval,
)
self.models[cluster] = LSTM(
horizon=self.prediction_horizon,
interval=self.prediction_interval,
sequence_length=self.prediction_seqlen,
)
self.models[cluster].fit(dataset)
self.models[cluster].save(self.cluster_to_file(save_path, cluster))
def predict(self, cluster_df, cluster, start_time, end_time):
"""
Given a cluster dataset, attempt to return prediction of query count
from a cluster within the given time-range.
"""
assert cluster_df.index.names[0] == "cluster"
assert cluster_df.index.names[1] == "log_time_s"
if cluster not in cluster_df.index.get_level_values(0):
print(f"Could not find cluster {cluster} in cluster_df")
return None
cluster_counts = cluster_df[cluster_df.index.get_level_values(0) == cluster].droplevel(0)
# Truncate cluster_df to the time range necessary to generate prediction range.
# TODO(Mike): Right now, if the sequence required to predict a certain interval
# is not present in the data, we simply do not make any predictions (i.e. return 0)
# Should we produce a warning/error so the user is aware there is insufficient
# data?
trunc_start = start_time - self.prediction_horizon - (self.prediction_seqlen) * self.prediction_interval
trunc_end = end_time - self.prediction_horizon
truncated = cluster_counts[(cluster_counts.index >= trunc_start) & (cluster_counts.index < trunc_end)]
dataset = ForecastDataset(
truncated,
sequence_length=self.prediction_seqlen,
horizon=self.prediction_horizon,
interval=self.prediction_interval,
)
# generate predictions
predictions = [self.models[cluster].predict(seq) for seq, _ in dataset]
# tag with timestamps
pred_arr = [[dataset.get_y_timestamp(i), pred] for i, pred in enumerate(predictions)]
pred_df = | pd.DataFrame(pred_arr, columns=["log_time_s", "count"]) | pandas.DataFrame |
import itertools
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
notna,
)
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [
Series(dtype=np.float64, name="a"),
Series([np.nan] * 5),
Series([1.0] * 5),
Series(range(5, 0, -1)),
Series(range(5)),
Series([np.nan, 1.0, np.nan, 1.0, 1.0]),
Series([np.nan, 1.0, np.nan, 2.0, 3.0]),
Series([np.nan, 1.0, np.nan, 3.0, 2.0]),
]
def create_dataframes():
return [
DataFrame(columns=["a", "a"]),
DataFrame(np.arange(15).reshape((5, 3)), columns=["a", "a", 99]),
] + [ | DataFrame(s) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
# @author : <NAME>
# @Email : <EMAIL>
# @Project : Python_Files
# @File : utils.py
# @Software: PyCharm
# @Time : 2021/5/20 下午7:42
"""
import os
import struct
import sys
import time
import traceback
from datetime import datetime
from pathlib import Path
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
pd.set_option("display.max_columns", None)
# 相应的我们可以设置显示的最大行数
pd.set_option("display.max_rows", None)
# function: byte2int
def byte2int(data, mode="u16"):
dbyte = bytearray(data)
darray = []
i = 0
while i < len(dbyte):
if "u8" == mode:
darray.append(dbyte[i])
i = i + 1
elif "u16" == mode:
darray.append(dbyte[i] | dbyte[i + 1] << 8)
i = i + 2
return darray
# end: byte2int
# function: byte2float
def byte2float(data, mode="float"):
darray = []
i = 0
if "float" == mode:
while i < len(data):
fx = struct.unpack("f", data[i : i + 4])
darray.append(fx)
i = i + 4
elif "double" == mode:
while i < len(data):
dx = struct.unpack("d", data[i : i + 8])
darray.append(dx)
i = i + 8
return darray
# end: byte2float
def read_bytefile(path, folder, file, mode="u8"):
fname = path + folder + file
f = open(fname, "rb")
dtmp = f.read()
global rslt
if "u8" == mode:
rslt = byte2int(dtmp, mode="u8")
if "u16" == mode:
rslt = byte2int(dtmp, mode="u16")
if "float" == mode:
rslt = byte2float(dtmp, mode="float")
if "double" == mode:
rslt = byte2float(dtmp, mode="double")
return rslt
# 向sheet中写入一行数据
def insertOne(value, sheet):
sheet.append(value)
def read_raw(src_dir, fname):
bcg, gain = [], []
fname = src_dir + fname
f = open(fname, "rb")
dtmp = f.read()
dbyte = bytearray(dtmp)
i = 0
while i < len(dbyte):
bcg.append(dbyte[i] | dbyte[i + 1] << 8)
gain.append(dbyte[i + 2])
i = i + 3
return bcg, gain
def read_wgt(src_dir, fname):
wgt = []
fname = src_dir + fname
f = open(fname, "rb")
dtmp = f.read()
dbyte = bytearray(dtmp)
i = 0
while i < len(dbyte):
wgt.append(dbyte[i + 1] | dbyte[i] << 8)
i = i + 2
return wgt
def time2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d %H:%M:%S")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2time(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return otherStyleTime
def day2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2day(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d", timeArray)
return otherStyleTime
def hour2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d %H:%M")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2hour(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M", timeArray)
return otherStyleTime
def time2datetime(tranTime, pList):
tdelta, startstamp = 60, int(time2stamp(tranTime))
t = [datetime.fromtimestamp(startstamp + t * tdelta) for t in range(len(pList))]
return t
def time_formattime(pList):
famTime = [datetime.fromisoformat(t) for t in pList]
return famTime
def quest_time_extract(num_spl, quest_outbed, slp_awTim):
num_slp0 = num_spl[0]
num_slp2 = num_spl[:2]
aslp_day = stamp2day(day2stamp(slp_awTim) - 86400)
awak_day = slp_awTim
if len(num_spl) == 6:
outbed_stamp = "0" + num_spl[0] + ":" + num_spl[1:3] + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 4:
outbed_stamp = num_spl[:2] + ":" + num_spl[2:] + ":00"
if int(num_slp2) >= 19 and int(num_slp2) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp2) >= 0 and int(num_slp2) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 3:
outbed_stamp = "0" + num_spl[0] + ":" + num_spl[1:] + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 2:
outbed_stamp = "0" + num_spl[0] + ":" + "00" + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 1:
outbed_stamp = "0" + num_spl + ":" + "00" + ":00"
if int(num_spl) >= 19 and int(num_spl) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_spl) >= 0 and int(num_spl) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
def diff_acl(slpList, psgList):
fslp_diff = int(abs(time2stamp(str(psgList)) - time2stamp(str(slpList))) / 60)
return fslp_diff
def num_pop(num1: list, num2: list):
if len(num1) > len(num2):
lenDiff = len(num1) - len(num2)
for i in range(lenDiff):
num1.pop()
elif len(num2) > len(num1):
lenDiff = len(num2) - len(num1)
for i in range(lenDiff):
num2.pop()
def num3_pop(num1: list, num2: list, num3: list):
num2 = [str(i) for i in range(len(num2))]
num3 = [str(i) for i in range(len(num3))]
maxLen = max(len(num1), len(num2), len(num3))
minLen = min(len(num1), len(num2), len(num3))
plen = maxLen - minLen
new_num1, new_num2, new_num3 = 0, 0, 0
for i in range(maxLen):
if len(num1) == maxLen:
new_num1 = num1[:-plen]
elif len(num2) == maxLen:
new_num2 = num2[:-plen]
elif len(num3) == maxLen:
new_num3 = num3[:-plen]
return new_num1, new_num2, new_num3
def len_compare(pr_list: list, rr_list: list):
if len(pr_list) > len(rr_list):
return len(rr_list)
elif len(pr_list) < len(rr_list):
return len(pr_list)
def path_concat(sub_dir, pathName):
_path = str(sub_dir.joinpath(pathName)) + "/"
return _path
def is_empty_file_3(file_path: str):
assert isinstance(file_path, str), f"file_path参数类型不是字符串类型: {type(file_path)}"
p = Path(file_path)
assert p.is_file(), f"file_path不是一个文件: {file_path}"
return p.stat().st_size == 0
def dir_empty(dir_path):
try:
next(os.scandir(dir_path))
return False
except StopIteration:
return True
def select_num(df1, df2):
# num_requried = 0
hr_lower_limit = df1["hr"].map(lambda x: x != 0)
hr_upper_limit = df1["hr"].map(lambda x: x != 255)
br_lower_limit = df1["br"].map(lambda x: x != 0)
br_upper_limit = df1["br"].map(lambda x: x != 255)
pr_lower_limit = df2["pr"].map(lambda x: x != 0)
pr_upper_limit = df2["pr"].map(lambda x: x != 255)
rr_lower_limit = df2["rr"].map(lambda x: x != 0)
rr_upper_limit = df2["rr"].map(lambda x: x != 255)
df1 = df1[
(hr_lower_limit & hr_upper_limit & br_lower_limit & br_upper_limit)
& (pr_lower_limit & pr_upper_limit & rr_lower_limit & rr_upper_limit)
]
df2 = df2[
(hr_lower_limit & hr_upper_limit & br_lower_limit & br_upper_limit)
& (pr_lower_limit & pr_upper_limit & rr_lower_limit & rr_upper_limit)
]
df1 = df1.reset_index(drop=True) # 重新给索引
df2 = df2.reset_index(drop=True) # 重新给索引
return df1, df2
def minute_mean(df, cname, stime):
# 计算每分钟SLP的心率、呼吸率
hr_min_list = []
slp_time_min_list = []
df_min = int(len(df[cname]) / 60) # 数据共多少分钟
for i in range(df_min):
hr_min_len = (i + 1) * 60
num = 0
temp = 0
slp_time_min = stime + hr_min_len
for j in df[cname][hr_min_len - 60 : hr_min_len]:
if j != 0 and j != 255:
num += 1
temp += j
if num > 0:
res = int(temp / num)
hr_min_list.append(res)
if num == 0:
hr_min_list.append(0)
slp_time_min_list.append(slp_time_min)
# rslt = {'time':slp_time_min_list,'hr':hr_min_list,'br':br_min_list}
# df_clean = pd.DataFrame(data=rslt)
return slp_time_min_list, hr_min_list
def file_exist(my_file):
txt_list = []
if Path(my_file).is_file() is False:
Path(my_file).touch()
return txt_list
def Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv):
PR = PR[PR.map(lambda x: x > 0)]
HR = HR[HR.map(lambda x: x > 0)]
PR = PR.reset_index(drop=True) # 重新给索引
HR = HR.reset_index(drop=True) # 重新给索引
diff_hr = PR - HR
diff_hr_cnt = 0
try:
diff_hr_pre = abs(diff_hr) / PR
diff_hr_pre = diff_hr_pre.dropna()
diff_hr_pre = diff_hr_pre * 100
for i, val in enumerate(diff_hr):
if i <= len(PR):
if abs(val) <= PR[i] * 0.1 or abs(val) <= 5:
diff_hr_cnt += 1
hr_mean = round(np.mean(abs(diff_hr)), 2)
hr_std = round(np.std(abs(diff_hr), ddof=1), 2)
if len(diff_hr_pre) == 0:
print(traceback.print_exc())
else:
acc_hr = diff_hr_cnt / len(diff_hr_pre)
txt_content = (
fcsv
+ " 心率准确性[%d / %d]: %.2f %%"
% (
diff_hr_cnt,
len(diff_hr_pre),
round(acc_hr * 100, 2),
)
+ " 心率误差:",
str(hr_mean) + "±" + str(hr_std),
)
f = open(src_txt + "accuracy.txt", "a")
f.write((str(txt_content) + "\r"))
return acc_hr
except Exception as exc:
print(exc)
print(traceback.print_exc())
def Respiration_rate_accuracy_calculat(RR, br, src_txt, fcsv):
RR = RR[RR.map(lambda x: x > 0)]
br = br[br.map(lambda x: x > 0)]
RR = RR.reset_index(drop=True) # 重新给索引
br = br.reset_index(drop=True) # 重新给索引
try:
# 计算呼吸率准确性
diff_br_pre = abs(RR - br)
diff_br_pre = diff_br_pre.dropna()
diff_br_cnt = 0
for i in diff_br_pre:
if i <= 2:
diff_br_cnt += 1
br_mean = round(np.mean(abs(diff_br_pre)), 2)
br_std = round(np.std(abs(diff_br_pre), ddof=1), 2)
if len(diff_br_pre) == 0:
print(traceback.print_exc())
else:
acc_br = diff_br_cnt / len(diff_br_pre)
txt_content = (
fcsv
+ " 呼吸率准确性[%d / %d]: %.2f %%"
% (
diff_br_cnt,
len(diff_br_pre),
round(acc_br * 100, 2),
)
+ " 呼吸率误差:",
str(br_mean) + "±" + str(br_std),
)
f = open(src_txt + "accuracy.txt", "a")
f.write((str(txt_content) + "\r"))
return acc_br
except Exception as exc:
print(exc)
print(traceback.print_exc())
def draw_PR_save(PR, slp_hr, time_offset, img_dir, fcsv, acc_flag):
# 作图
mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False
# 配置横坐标日期显示#格式#间隔
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y%m/%d %H:%M:%S"))
plt.gca().xaxis.set_major_locator(mdates.MinuteLocator(interval=15))
if len(PR) > len(time_offset):
PR = PR[:-1]
ax1 = plt.subplot(412)
plt.plot(time_offset, PR, "r-", label="PSG")
plt.plot(time_offset, slp_hr, "b-", label="智能枕头")
plt.title("心率对比(bpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax1.get_xticklabels(), visible=False, fontsize=9)
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(40, 100)
f = plt.gcf() # 获取当前图像
if acc_flag == 1:
f.savefig(img_dir + "err_img/" + fcsv + ".png", bbox_inches="tight")
elif acc_flag == 0:
f.savefig(img_dir + "nor_img/" + fcsv + ".png", bbox_inches="tight")
f.clear() # 释放内存
def draw_PR_RR_save(PR, RR, slp_hr, slp_br, time_offset, img_dir, fcsv, acc_flag):
# 作图
mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False
# fig.suptitle(fname)
# 配置横坐标日期显示#格式#间隔
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y%m/%d %H:%M:%S"))
plt.gca().xaxis.set_major_locator(mdates.MinuteLocator(interval=15))
if len(PR) > len(time_offset):
PR = PR[:-1]
if len(RR) > len(time_offset):
RR = RR[:-1]
print(len(time_offset), len(PR))
print(time_offset)
ax1 = plt.subplot(412)
plt.plot(time_offset, PR, "r-", label="PSG")
plt.plot(time_offset, slp_hr, "b-", label="智能枕头")
plt.title("心率对比(bpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax1.get_xticklabels(), visible=False, fontsize=9)
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(40, 100)
ax2 = plt.subplot(413, sharex=ax1)
plt.plot(time_offset, RR, "r-", label="PSG")
plt.plot(time_offset, slp_br, "b-", label="智能枕头")
plt.title("呼吸率对比(rpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax2.get_xticklabels(), visible=True, fontsize=9)
plt.xticks()
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(5, 35)
f = plt.gcf() # 获取当前图像
if acc_flag == 1:
f.savefig(img_dir + "err_img/" + fcsv + ".png", bbox_inches="tight")
elif acc_flag == 0:
f.savefig(img_dir + "nor_img/" + fcsv + ".png", bbox_inches="tight")
# f.figlegend()
f.clear() # 释放内存
def slp_hr_br_transfrom(cat_dir, save_dir, flag):
# slp批量仿真数据转成csv文件
flist = os.listdir(cat_dir + "hr_sec/")
for fcsv in flist[:]:
fname = fcsv.split(".")[0]
hr_list = read_bytefile(cat_dir, "hr_sec/", fcsv, mode="u8")
br_list = read_bytefile(cat_dir, "br_sec/", fcsv, mode="u8")
startstamp = int(fcsv.split("_")[-1].split(".")[0])
time_list = [startstamp + t for t in range(len(hr_list))]
if flag == 0:
rslt = {"time": time_list, "heart_rate": hr_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "heart_rate"]
)
elif flag == 1:
rslt = {"time": time_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "breath_rate"]
)
elif flag == 2:
rslt = {"time": time_list, "heart_rate": hr_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"),
index=False,
header=["time", "heart_rate", "breath_rate"],
)
def psg_slp_heart_cal(src_slp, src_psg, src_txt, src_img):
"""心率准确性脚本计算"""
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
print(fcsv, psg_flist[i])
data_psg = pd.read_csv(src_psg + psg_flist[i])
data_slp.columns = ["time", "hr"]
data_psg.columns = ["time", "pr"]
time_set = [
data_slp["time"].tolist()[0],
time2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
time2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
data_psg["timestamp"] = time2stamp(data_psg["time"])
print(
"开始区间:", file_start, "结束区间:", file_end, "公共区间长度:", (file_end - file_start)
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr < 0.9:
acc_flag = 1
draw_PR_save(PR, slp_hr, time_offset, src_img, simg_name, acc_flag)
else:
draw_PR_save(PR, slp_hr, time_offset, src_img, simg_name, acc_flag)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
def psg_slp_heart_breath_cal(src_slp, src_psg, src_txt, src_img, flag):
"""心率、呼吸率准确性计算脚本"""
if flag == 0:
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
slp_idList = [i.split(".")[0].split("_")[0] for i in slp_flist]
psg_idList = [i.split(".")[0].split("_")[0] for i in psg_flist]
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
# print(slp_idList[i],psg_idList[i])
j = psg_idList.index(slp_idList[i])
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
data_psg = pd.read_csv(src_psg + psg_flist[j])
data_slp.columns = ["time", "hr", "br"]
data_psg.columns = ["time", "pr", "rr"]
time_set = [
data_slp["time"].tolist()[0],
time2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
time2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
data_psg["timestamp"] = time2stamp(data_psg["time"])
print(
"开始区间:",
file_start,
"结束区间:",
file_end,
"公共区间长度:",
(file_end - file_start),
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
br_time, br_list = minute_mean(slp_clist, "br", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rr_time, rr_list = minute_mean(psg_clist, "rr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list, "br": br_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list, "rr": rr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
BR = clean_slp["br"]
RR = clean_psg["rr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
acc_br = Respiration_rate_accuracy_calculat(RR, BR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
slp_br = pd.Series(list(BR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr is not None and acc_br is not None:
if acc_hr < 0.9 or acc_br < 0.9:
acc_flag = 1
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
else:
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
elif flag == 1:
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
slp_idList = [i.split(".")[0].split("_")[0] for i in slp_flist]
psg_idList = [i.split(".")[0].split("_")[0].lstrip("0") for i in psg_flist]
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
j = psg_idList.index(slp_idList[i])
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
data_psg = pd.read_csv(src_psg + psg_flist[j])
data_slp.columns = ["time", "hr", "br"]
data_psg.columns = ["time", "pr", "rr"]
time_set = [
data_slp["time"].tolist()[0],
hour2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
hour2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
print(time_set[1], time_set[0])
# data_psg["timestamp"] = data_psg["time"].apply(lambda x: hour2stamp(x))
data_psg["timestamp"] = hour2stamp(data_psg["time"])
print(
"开始区间:",
file_start,
"结束区间:",
file_end,
"公共区间长度:",
(file_end - file_start),
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
br_time, br_list = minute_mean(slp_clist, "br", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rr_time, rr_list = minute_mean(psg_clist, "rr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list, "br": br_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list, "rr": rr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
BR = clean_slp["br"]
RR = clean_psg["rr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
acc_br = Respiration_rate_accuracy_calculat(RR, BR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
slp_br = pd.Series(list(BR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr < 0.9 or acc_br < 0.9:
acc_flag = 1
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
else:
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
def psg_rr_transfrom(cat_dir, save_dir):
# psg批量仿真数据转成csv文件
flist = os.listdir(cat_dir + "br_sec/")
for fcsv in flist[:]:
fname = fcsv.split(".")[0]
br_list = read_bytefile(cat_dir, "br_sec/", fcsv, mode="u8")
startstamp = int(fcsv.split("_")[-1].split(".")[0])
time_list = [startstamp + t for t in range(len(br_list))]
rslt = {"time": time_list, "breath_rate": br_list}
df = | pd.DataFrame(data=rslt) | pandas.DataFrame |
import os
import glob
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def load_dataset_file(dataset_file_path):
"""
This method loads dataset file.
Currently supported formats are .csv and .json.
:param dataset_file_path: path of the dataset file
:return: pandas dataframe that contains dataset information
"""
_, file_extension = os.path.splitext(dataset_file_path)
if file_extension == ".csv":
data_info = pd.read_csv(dataset_file_path)
elif file_extension == ".json":
data_info = pd.read_json(dataset_file_path)
else:
raise ValueError("Unknown file type: {0}".format(file_extension))
return data_info
def create_dataset(data_template, mask_template=None, classification=False, save_dataset_file=False, output_dataset_file_path=""):
"""
This method creates dataset file that contains columns with paths of the data files, group names and optionally paths of mask files and class names
under the assumption that corresponding files will have the same index after sorting.
Data paths column will have name: "image".
Mask paths column will have name: "mask".
Basename column ("image_basename") will be created.
Group column with name "group" will be created to distinguish between different groups of data and will contain name of the parent directory
(in classification case second degree parent directory) of the data file.
In classification case class column with name "class" will be created and will contain name of the parent directory of the data file
:param data_template: data template that are readable by glob
:param mask_template: mask template that are readable by glob
:param classification: if True classification dataset file will be created ("class" column will be added), if False segmentation dataset will be created
:param save_dataset_file: if True dataset file will be saved in .csv format
:param output_dataset_file_path: path of the output dataset file
:return: pandas dataframe that contains paths of the files in directories
"""
data_info = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding=utf-8
"""
@version: 0.1
@author: li
@file: factor_revenue_quality.py
@time: 2019-01-28 11:33
"""
import gc, six
import sys
sys.path.append("../")
sys.path.append("../../")
sys.path.append("../../../")
import numpy as np
import pandas as pd
import json
from pandas.io.json import json_normalize
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
@six.add_metaclass(Singleton)
class FactorRevenueQuality(object):
"""
收益质量
"""
def __init__(self):
__str__ = 'factor_revenue_quality'
self.name = '财务指标'
self.factor_type1 = '财务指标'
self.factor_type2 = '收益质量'
self.description = '财务指标的二级指标, 收益质量'
@staticmethod
def NetNonOIToTP(tp_revenue_quanlity, revenue_quality, dependencies=['total_profit', 'non_operating_revenue', 'non_operating_expense']):
"""
:name: 营业外收支净额/利润总额
:desc: 营业外收支净额/利润总额
:unit:
:view_dimension: 0.01
"""
earning = tp_revenue_quanlity.loc[:, dependencies]
earning['NetNonOIToTP'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
(earning.non_operating_revenue.values +
earning.non_operating_expense.values)
/ earning.total_profit.values
)
earning = earning.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, earning, how='outer', on="security_code")
return revenue_quality
@staticmethod
def NetNonOIToTPTTM(ttm_revenue_quanlity, revenue_quality, dependencies=['total_profit', 'non_operating_revenue', 'non_operating_expense']):
"""
:name:营业外收支净额(TTM)/利润总额(TTM)
:desc: 营业外收支净额(TTM)/利润总额(TTM)
:unit:
:view_dimension: 0.01
"""
earning = ttm_revenue_quanlity.loc[:, dependencies]
earning['NetNonOIToTPTTM'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
(earning.non_operating_revenue.values +
earning.non_operating_expense.values)
/ earning.total_profit.values
)
earning = earning.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, earning, how='outer', on="security_code")
return revenue_quality
@staticmethod
def OperatingNIToTPTTM(ttm_revenue_quanlity, revenue_quality, dependencies=['total_operating_revenue', 'total_operating_cost', 'total_profit']):
"""
:name: 经营活动净收益/利润总额(TTM)
:desc: 经营活动净收益(TTM)/利润总额(TTM)*100%(注,对于非金融企业 经营活动净收益=营业总收入-营业总成本; 对于金融企业 经营活动净收益=营业收入-公允价值变动损益-投资收益-汇兑损益-营业支出 此处以非金融企业的方式计算)
:unit:
:view_dimension: 0.01
"""
earning = ttm_revenue_quanlity.loc[:, dependencies]
earning['OperatingNIToTPTTM'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
(earning.total_operating_revenue.values -
earning.total_operating_cost.values)
/ earning.total_profit.values)
earning = earning.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, earning, how='outer', on="security_code")
return revenue_quality
@staticmethod
def OperatingNIToTP(tp_revenue_quanlity, revenue_quality, dependencies=['total_operating_revenue', 'total_operating_cost', 'total_profit']):
"""
:name: 经营活动净收益/利润总额
:desc:(注,对于非金融企业 经营活动净收益=营业总收入-营业总成本; 对于金融企业 经营活动净收益=营业收入-公允价值变动损益-投资收益-汇兑损益-营业支出 此处以非金融企业的方式计算)
:unit:
:view_dimension: 0.01
"""
earning = tp_revenue_quanlity.loc[:, dependencies]
earning['OperatingNIToTP'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
(earning.total_operating_revenue.values -
earning.total_operating_cost.values)
/ earning.total_profit.values)
earning = earning.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, earning, how='outer', on="security_code")
return revenue_quality
@staticmethod
def OptCFToCurrLiabilityTTM(ttm_revenue_quanlity, revenue_quality, dependencies=['net_operate_cash_flow_indirect', 'total_current_liability']):
"""
:name: 经营活动产生的现金流量净额(TTM)/流动负债(TTM)
:desc: 经营活动产生的现金流量净额(TTM)/流动负债(TTM)
:unit:
:view_dimension: 0.01
"""
cash_flow = ttm_revenue_quanlity.loc[:, dependencies]
cash_flow['OptCFToCurrLiabilityTTM'] = np.where(
CalcTools.is_zero(cash_flow.total_current_liability.values), 0,
cash_flow.net_operate_cash_flow_indirect.values / cash_flow.total_current_liability.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, cash_flow, how='outer', on="security_code")
return revenue_quality
@staticmethod
def NetInToTPTTM(ttm_revenue_quanlity, revenue_quality, dependencies=['fair_value_variable_income',
'total_profit']):
"""
:name: 价值变动净收益/利润总额(TTM)
:desc: 价值变动净收益(TTM)/利润总额(TTM)(使用的是公允价值变动收益)
:unit:
:view_dimension: 0.01
"""
historical_value = ttm_revenue_quanlity.loc[:, dependencies]
func = lambda x: x[0] / x[1] if x[1] != 0 and x[1] is not None and x[0] is not None else None
historical_value['NetInToTPTTM'] = historical_value.apply(func, axis=1)
historical_value = historical_value.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, historical_value, how='outer', on='security_code')
return revenue_quality
@staticmethod
def OPToTPTTM(ttm_revenue_quanlity, revenue_quality,
dependencies=['operating_profit', 'total_profit']):
"""
:name: 营业利润/利润总额(TTM)
:desc: 营业利润(TTM)/利润总额(TTM)
:unit:
:view_dimension: 0.01
"""
historical_value = ttm_revenue_quanlity.loc[:, dependencies]
func = lambda x: x[0] / x[1] if x[1] is not None and x[1] != 0 else None
historical_value['OPToTPTTM'] = historical_value.apply(func, axis=1)
historical_value = historical_value.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, historical_value, how='outer', on='security_code')
return revenue_quality
@staticmethod
def PriceToRevRatioTTM(ttm_revenue_quanlity, revenue_quality,
dependencies=['net_profit', 'market_cap']):
"""
:name: 收益市值比(TTM)
:desc: 营业利润(TTM)/利润总额(TTM)
:unit:
:view_dimension: 0.01
"""
historical_value = ttm_revenue_quanlity.loc[:, dependencies]
func = lambda x: x[0] / x[1] if x[1] is not None and x[1] != 0 else None
historical_value['PriceToRevRatioTTM'] = historical_value[dependencies].apply(func, axis=1)
historical_value = historical_value.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, historical_value, how='outer', on='security_code')
return revenue_quality
@staticmethod
def PftMarginTTM(ttm_revenue_quanlity, revenue_quality,
dependencies=['total_profit', 'operating_revenue']):
"""
:name: 利润率(TTM)
:desc: 利润总额(TTM)/营业收入(TTM)
:unit:
:view_dimension: 0.01
"""
historical_value = ttm_revenue_quanlity.loc[:, dependencies]
func = lambda x: x[0] / x[1] if x[1] is not None and x[1] != 0 else None
historical_value['PftMarginTTM'] = historical_value[dependencies].apply(func, axis=1)
historical_value = historical_value.drop(dependencies, axis=1)
revenue_quality = | pd.merge(revenue_quality, historical_value, how='outer', on='security_code') | pandas.merge |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEquals(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assert_((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assert_(not np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assert_(not swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assert_(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assert_((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assert_(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assert_(not index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assert_(not index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect)
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect)
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assert_(leftside._get_axis(axis).equals(level_index))
self.assert_(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False) # TODO groupby with level_values drops names
self.assertEquals(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEquals(result.index.names, self.ymd.index.names[1:])
self.assertEquals(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEquals(result.index.name, self.ymd.index.names[2])
self.assertEquals(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEquals(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)]
+ [labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar','three'),'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar','three'),'B'], 0)
#----------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assert_((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
#----------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0),
('foo', 'qux', 0)],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'),
('foo', 'qux')],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
assert_series_equal(expected, result)
assert_frame_equal(df1, df2)
self.assertEquals(result.name, 'a')
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
assert_frame_equal(expected, result)
assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.ix[[0, 1, 2, 7, 8, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
assert_frame_equal(result, expected.T)
result = self.frame.ix[['foo', 'qux']]
assert_frame_equal(result, expected)
result = self.frame['A'].ix[['foo', 'qux']]
| assert_series_equal(result, expected['A']) | pandas.util.testing.assert_series_equal |
# coding=utf-8
import pandas as pd
import numpy as np
import re
from matplotlib.ticker import FuncFormatter
def number_formatter(number, pos=None):
"""Convert a number into a human readable format."""
magnitude = 0
while abs(number) >= 1000:
magnitude += 1
number /= 1000.0
return '%.1f%s' % (number, ['', 'K', 'M', 'B', 'T', 'Q'][magnitude])
def cuenta_tipo_de_dato(df,tipo):
"""
Esta función crea la tabla con información sobre la cantidad de cada tipo de dato encontrado en el csv
==========
* Args:
- df: el data frame al que se le va a realizar el conteo del tipo de dato.
- tipo: El nombre del tipo de dato que estamos buscando.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
# Para encontrar el tipo de dato numérico
>>conteo_nuericos = cuenta_tipo_de_dato(df, 'numerico')
# Para encontrar el tipo de dato texto
>>conteo_texto = cuenta_tipo_de_dato(df, 'object')
"""
vars_type = df.dtypes
vars_type = pd.DataFrame(vars_type, columns = ['tipo'])
if tipo == 'numerico':
cantidad_tipo = len(vars_type.loc[vars_type["tipo"] == "int64"])
cantidad_tipo = cantidad_tipo + len(vars_type.loc[vars_type["tipo"] == "float64"])
else:
cantidad_tipo = len(vars_type.loc[vars_type["tipo"] == tipo])
return cantidad_tipo
def cuenta_nulos_por_columnas(df):
"""
Función que realiza una tabla con la cuenta de missing values por columna y obtiene la proporción que estos missing
values representan del total.
==========
* Args:
- df: el data frame al que se le va a realizar el conteo de los nulos por cada columna.
* Return:
- Data Frame: entrega el data frame que indica cuantos elementos nulos fueron encontrados en cada columna.
==========
Ejemplo:
>>faltates_por_columna = cuenta_nulos_por_columnas(df)
"""
valores_nulos = df.isnull().sum()
porcentaje_valores_nulos = 100 * df.isnull().sum() / len(df)
tabla_valores_nulos = pd.concat([valores_nulos, porcentaje_valores_nulos], axis=1)
tabla_valores_nulos_ordenada = tabla_valores_nulos.rename(
columns={0: 'Missing Values', 1: '% del Total'})
tabla_valores_nulos_ordenada = tabla_valores_nulos_ordenada[
tabla_valores_nulos_ordenada.iloc[:, 1] != 0].sort_values(
'% del Total', ascending=False).round(1)
print("El dataframe tiene " + str(df.shape[1]) + " columnas.\n"
"Hay " + str(tabla_valores_nulos_ordenada.shape[0]) +
" columnas que tienen NA's.")
return tabla_valores_nulos_ordenada
def CreaTablaConteoPorcentaje(df, nomColumna, booleanNA):
"""
Esta función crea la tabla con información sobre los conteos y el porcentaje al que corresponden del total de los datos.
==========
* Args:
- df: el data frame completo.
- nomColumna: El nombre de la columna sobre la que se quiere realizar la tabla.
- booleanNA: Indicador booleano que indica si se requiere que se muestren los NA's en la tabla.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
>>df = CreaTablaConteoPorcentaje(df, 'RESPUESTA', True)
"""
df_resultado = df[nomColumna].value_counts(dropna=booleanNA)
df_resultado = pd.DataFrame(data=df_resultado)
df_resultado = df_resultado[nomColumna].map('{:,}'.format)
df_resultado = pd.DataFrame(data=df_resultado)
#obteniendo los porcentajes
df_resultado['porcentaje'] = df[nomColumna].value_counts(dropna=booleanNA, normalize=True).mul(100).round(2).astype(str)+'%'
return df_resultado
def CreaTablaConteoPorcentaje_sin_stringformat(df, nomColumna, booleanNA):
"""
Esta función crea la tabla con información sobre los conteos y el porcentaje al que corresponden del total de los datos.
==========
* Args:
- df: el data frame completo.
- nomColumna: El nombre de la columna sobre la que se quiere realizar la tabla.
- booleanNA: Indicador booleano que indica si se requiere que se muestren los NA's en la tabla.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
>>df = CreaTablaConteoPorcentaje(df, 'RESPUESTA', True)
"""
df_resultado = df[nomColumna].value_counts(dropna=booleanNA)
df_resultado = pd.DataFrame(data=df_resultado)
#obteniendo los porcentajes
df_resultado['porcentaje'] = df[nomColumna].value_counts(dropna=booleanNA, normalize=True).mul(100).round(2).astype(str)+'%'
return df_resultado
def StringLowercase(df):
"""
Función cambiar todos los strings de un dataframe a lowercase
(columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringLowercase(df)
"""
### Columnas
DataFrameColumns = df.columns
for col in DataFrameColumns:
df.rename(columns={col:col.lower()}, inplace=True)
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].str.lower()
return df
def StringAcentos(df):
"""
Función para eliminar acentos, dieresis y eñes de los strings de un
dataframe (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringAcentos(df)
"""
### Columnas
df.columns = df.columns.str.replace('á', 'a')
df.columns = df.columns.str.replace('é', 'e')
df.columns = df.columns.str.replace('í', 'i')
df.columns = df.columns.str.replace('ó', 'o')
df.columns = df.columns.str.replace('ú', 'u')
df.columns = df.columns.str.replace('ü', 'u')
df.columns = df.columns.str.replace('ñ', 'n')
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')
return df
def StringStrip(df):
"""
Función para eliminar espacios al inicio y al final de los strings de un
dataframe (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringStrip(df)
"""
### Columnas
df.columns = [col.strip() for col in df.columns]
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].apply(lambda x: x.strip() if isinstance(x, str) else x)
return df
def StringEspacios(df):
"""
Función para eliminar espacios dobles (o mas) de los strings de un
dataframe (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringEspacios(df)
"""
### Columnas
df.columns = [re.sub(' +', ' ', col) for col in df.columns]
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].apply(lambda x: re.sub(' +', ' ', x) if isinstance(x, str) else x)
return df
def EstandarizaFormato(df):
"""
Función para estandarizar un dataframe: minúsculas, sin espacios en blanco,
sin signos de puntuación (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = EstandarizaFormato(df)
"""
### Minúsculas
df = StringLowercase(df)
### Acentos
df = StringAcentos(df)
### Quitamos espacios al principio y al final
df = StringStrip(df)
### Quitamos espacios
df = StringEspacios(df)
### Quita espacios en columnas
df.columns = df.columns.str.replace(' ', '_')
return df
def prepara_dataset(df):
"""
Esta función hace las correcciones al dataset.
==========
* Args:
- df: el data frame al que se le van a hacer las correcciones.
* Return:
- Data Frame: entrega el data frame corregido.
==========
Ejemplo:
# Para encontrar el tipo de dato numérico
>>df = prepara_dataset(df)
"""
# Estandarizamos formato
df = EstandarizaFormato(df)
# cambiamos los tipos de variable
df = df.astype({"año_hechos":'category', "mes_hechos":'category', "delito":'category', "categoria_delito":'category',"fiscalia":'category', "agencia":'category'})
# cambiamos la columna geo_point
new = df['geo_point'].str.split(",", n = 1, expand = True)
df["latitud"]= new[0]
df["longitud"]= new[1]
# cambiamos el tipo para latitud y longitud
df = df.astype({"latitud":'float64', "longitud":'float64'})
# Eliminamos la columna geo_point
#df.drop(columns =["geo_point"], inplace = True)
# Eliminamos la columna geo_shape
#df.drop(columns =["geo_shape"], inplace = True)
return df
def genera_profiling_de_numericos(df,lista_numericas,vars_type):
"""
Función que genera un perfilamiento para los datos numéricos.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento para variables numéricas.
- lista_numericas: una lista con el nombre de las variables que son de tipo numérico.
- vars_type: tabla generada por la función cuenta_tipo_de_dato de este mismo script.
* Return:
- Data Frame: Data Frame con el perfilamiento para las variables numéricas.
==========
Ejemplo:
>>vars_type = cuenta_tipo_de_dato(df)
# Extraemos el nombre de las variables numericas:
>>variables_int = vars_type.loc[vars_type["tipo"] == "int64"]
>>variables_float = vars_type.loc[vars_type["tipo"] == "float64"]
>>variables_numericas = variables_int.append(variables_float, ignore_index=True)
>>lista_numericas = list(variables_numericas['variable'])
# Generamos el perfilamiento para esas variables
>>perfilamiento_de_numericas = genera_profiling_de_numericos(df,lista_numericas,vars_type)
"""
# Obtenemos los estadísticos de la columna si es numérica
lista_perfilamiento_numerico = ['Tipo','Número de observaciones', 'Media', 'Desviación estándar',
'Cuartil 25%','Cuartil 50%','Cuartil 75%','Mínimo','Máximo',
'Número de observaciones únicas','Número de faltantes','Top1/veces/%',
'Top2/veces/%','Top3/veces/%'
,'Top4/veces/%','Top5/veces/%']
datos_dataframe_profiling_numericas = {'Métrica':lista_perfilamiento_numerico}
dataframe_profiling_numericas = pd.DataFrame(data=datos_dataframe_profiling_numericas)
for col in lista_numericas:
# tipo de dato
vars_type_num = pd.DataFrame(vars_type)
#vars_type_num
df_tipo = pd.DataFrame(data=vars_type_num.loc[vars_type_num["variable"] == col])
tipo_dato=df_tipo['tipo'][0]
#print(tipo_dato)
# Obtenemos las métricas relevantes
descr_col = df[col].describe()
descr_col = pd.DataFrame(descr_col)
descr_col['Métrica']=descr_col.index
descr_col.columns=['valor','Métrica']
# número de observaciones
medida = 'count'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
num_observaciones_num = metrica['valor'][0]
#print(num_observaciones_num)
# media
medida = 'mean'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
media_obs_num = metrica['valor'][0]
media_obs_num = media_obs_num.round(2)
#print(media_obs_num)
# desviacion estándar
medida = 'std'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
sd_obs_num = metrica['valor'][0]
sd_obs_num = sd_obs_num.round(2)
#print(sd_obs_num)
# cuartil 25
medida = '25%'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
cuant_25_obs_num = metrica['valor'][0]
cuant_25_obs_num = cuant_25_obs_num.round(2)
#print(cuant_25_obs_num)
# cuartil 50
medida = '50%'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
cuant_50_obs_num = metrica['valor'][0]
cuant_50_obs_num = cuant_50_obs_num.round(2)
#print(cuant_50_obs_num)
#cuant_50_obs_num = agua.quantile(q=0.25)
#print(cuant_50_obs_num)
# cuartil 75
medida = '75%'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
cuant_75_obs_num = metrica['valor'][0]
cuant_75_obs_num = cuant_75_obs_num.round(2)
#print(cuant_75_obs_num)
#cuant_75_obs_num = agua.quantile(q=0.25)
#print(cuant_75_obs_num)
# minimo
medida = 'min'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
minimo_obs_num = metrica['valor'][0]
minimo_obs_num = minimo_obs_num.round(2)
#print(minimo_obs_num)
# maximo
medida = 'max'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
maximo_obs_num = metrica['valor'][0]
maximo_obs_num = maximo_obs_num.round(2)
#print(maximo_obs_num)
# numero de observaciones unicas
num_obs_unicas_obs_num = df[col].nunique()
#print(num_obs_unicas_obs_num)
# Número de observaciones con valores faltantes
obs_faltantes_obs_num = df[col].isna().sum()
# top 5 observaciones repetidas
# df_resultado = df[col].value_counts(dropna=True)
# df_resultado = pd.DataFrame(df_resultado)
# df_resultado.columns=['conteo_top_5']
# df_resultado=df_resultado.sort_values('conteo_top_5', ascending = False)
#top5 = df_resultado.head(5)
#print(top5)
# generamos tabla para las modas
tabla_importantes = CreaTablaConteoPorcentaje(df,str(col),True)
tabla_importantes.columns = ['conteo','porcentaje']
top1 = tabla_importantes.index[0]
veces1 = list(tabla_importantes['conteo'])[0]
porcentaje1 = list(tabla_importantes['porcentaje'])[0]
datos_top1 = [top1,veces1,porcentaje1]
# #datos_top1 = list([tabla_importantes[0:1]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1]
if(len(tabla_importantes)>1):
top2 = tabla_importantes.index[1]
veces2 = list(tabla_importantes['conteo'])[1]
porcentaje2 = list(tabla_importantes['porcentaje'])[1]
datos_top2 = [top2,veces2,porcentaje2]
# datos_top2 = list([tabla_importantes[1:2]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2]
else:
datos_top2 = ['N/A','N/A','N/A']
if(len(tabla_importantes)>2):
top3 = tabla_importantes.index[2]
veces3 = list(tabla_importantes['conteo'])[2]
porcentaje3 = list(tabla_importantes['porcentaje'])[2]
datos_top3 = [top3,veces3,porcentaje3]
# # datos_top3 = list([tabla_importantes[2:3]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje','top3/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2,datos_top3]
else:
datos_top3 = ['N/A','N/A','N/A']
if(len(tabla_importantes)>3):
top4 = tabla_importantes.index[3]
veces4 = list(tabla_importantes['conteo'])[3]
porcentaje4 = list(tabla_importantes['porcentaje'])[3]
datos_top4 = [top4,veces4,porcentaje4]
# datos_top4 = list([tabla_importantes[3:4]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje','top3/veces/porcentaje'
# ,'top4/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2,datos_top3,
# datos_top4]
else:
datos_top4 = ['N/A','N/A','N/A']
if(len(tabla_importantes)>4):
top5 = tabla_importantes.index[4]
veces5 = list(tabla_importantes['conteo'])[4]
porcentaje5 = list(tabla_importantes['porcentaje'])[4]
datos_top5 = [top5,veces5,porcentaje5]
# datos_top5 = list([tabla_importantes[4:5]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje','top3/veces/porcentaje'
# ,'top4/veces/porcentaje','top5/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2,datos_top3,
# datos_top4,datos_top5]
else:
datos_top5 = ['N/A','N/A','N/A']
#print(obs_faltantes_obs_num)
datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
maximo_obs_num,num_obs_unicas_obs_num,obs_faltantes_obs_num,datos_top1,datos_top2,datos_top3,
datos_top4,datos_top5]
# datos_dataframe_profiling_numericas = {'metrica':lista_perfilamiento_numerico}
# dataframe_profiling_numericas = pd.DataFrame(data=datos_dataframe_profiling_numericas)
dataframe_profiling_numericas[col]=datos_variable
return dataframe_profiling_numericas
def genera_profiling_general(df):
"""
Función que genera la tabla con un perfilamiento general del data set, sin entrar al detalle por variable.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento general.
* Return:
- Data Frame: entrega el data frame con un perfilamiento general del data set.
==========
Ejemplo:
>>perfilamiento_general = genera_profiling_general(df)
"""
cuenta_de_variables = len(df.columns)
cuenta_observaciones = len(df)
total_celdas = cuenta_de_variables*cuenta_observaciones
# Contamos el tipo de datos del dataset
vars_type = df.dtypes
vars_type = pd.DataFrame(vars_type, columns = ['tipo'])
# Asignamos un valor para cada tipo
## Numéricas
cantidad_numericas = len(vars_type.loc[vars_type["tipo"] == "int64"])
cantidad_numericas = cantidad_numericas + len(vars_type.loc[vars_type["tipo"] == "float64"])
#print(cantidad_numericas)
## Fechas
cantidad_fecha = len(vars_type.loc[vars_type["tipo"] == "datetime64[ns]"])
#print(cantidad_fecha)
## Categoricas
cantidad_categoricas = len(vars_type.loc[vars_type["tipo"] == "category"])
#print(cantidad_categoricas)
## Texto
cantidad_texto = len(vars_type.loc[vars_type["tipo"] == "object"])
#print(cantidad_texto)
# Contamos los faltantes
nulos_totales = cuenta_nulos_por_columnas(df)['Missing Values'].sum()
#print(nulos_totales)
# Obtenemos el porcentaje de datos que son faltantes
nulos_porcentaje = ((nulos_totales/(total_celdas))*100).round(1).astype(str)+'%'
#print(nulos_porcentaje)
# Obtenemos el total de columnas duplicadas
ds_duplicados = df.duplicated(subset=None, keep='first')
ds_duplicados = pd.DataFrame(ds_duplicados,columns = ['duplicated'])
numero_de_duplicados = len(ds_duplicados.loc[ds_duplicados["duplicated"] == True])
#print(numero_de_duplicados)
# Obtenemos el porcentaje de duplicados
porcentaje_de_duplicados = str(((numero_de_duplicados/(total_celdas))*100))+'%'
#print(porcentaje_de_duplicados)
estadisticas = ['Total de variables','Conteo de observaciones','Total de celdas',
'Cantidad de variables numéricas','Cantidad de variables de fecha',
'Cantidad de variables categóricas', 'Cantidad de variables de texto',
'Valores faltantes','Porcentaje de valores faltantes',
'Renglones duplicados', 'Porcentaje de valores duplicados']
valores_estadisticas = [cuenta_de_variables,cuenta_observaciones,total_celdas,cantidad_numericas,
cantidad_fecha,cantidad_categoricas,cantidad_texto,nulos_totales,nulos_porcentaje,
numero_de_duplicados,porcentaje_de_duplicados]
valores = {'Estadísticas':estadisticas,'Resultado':valores_estadisticas}
df_perfilamiento_general = pd.DataFrame(data=valores)
return df_perfilamiento_general
def genera_profiling_de_categorias(df, lista_category,vars_type):
"""
Función que genera un perfilamiento para los datos categóricos.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento para variables categóricas.
- lista_category: una lista con el nombre de las variables que son de tipo categórico.
- vars_type: tabla generada por la función cuenta_tipo_de_dato de este mismo script.
* Return:
- Data Frame: Data Frame con el perfilamiento para las variables categóricas.
==========
Ejemplo:
>>vars_type = cuenta_tipo_de_dato(df)
# Extraemos el nombre de las variables categoricas:
>>variables_category = vars_type.loc[vars_type["tipo"] == "category"]
>>lista_category = list(variables_category['variable'])
# Generamos el perfilamiento para esas variables
>>profiling_de_categorias = genera_profiling_de_categorias(df,lista_category,vars_type)
"""
# Obtenemos los estadísticos de la columna si es catagorica
lista_perfilamiento_categorico = ['Tipo','Número de categorías', 'Número de observaciones',
'Observaciones nulas','% Observaciones nulas', 'Valores únicos',
'Moda1/veces/%','Moda2/veces/%','Moda3/veces/%']
datos_dataframe_profiling_categoricos = {'Métrica':lista_perfilamiento_categorico}
dataframe_profiling_categoricas = pd.DataFrame(data=datos_dataframe_profiling_categoricos)
for col in lista_category:
#Tipo de dato
vars_type_cat = pd.DataFrame(vars_type)
#vars_type_cat
df_tipo = pd.DataFrame(data=vars_type_cat.loc[vars_type_cat["variable"] == col])
tipo_dato=df_tipo['tipo'][0]
#Obtenemos las métricas relevantes
descr_col = df[col]
descr_col = pd.DataFrame(descr_col)
descr_col['metrica']=descr_col.index
descr_col.columns=['valor','Métrica']
#Numero de categorias
num_categorias=descr_col.nunique()["valor"]
#Numero de observaciones
num_observaciones=len(descr_col)
#Valores nulos
num_obs_nulas=df[col].isna().sum()
#%Valores nulos
por_obs_nulas=num_obs_nulas/num_observaciones
#Valor de las categorias
valores_unicos = list(df[col].unique())
#Generamos tabla para las modas
tabla_importantes = CreaTablaConteoPorcentaje(df,str(col),True)
tabla_importantes.columns = ['conteo','porcentaje']
moda1 = tabla_importantes.index[0]
veces1 = tabla_importantes['conteo'][0]
porcentaje1 = tabla_importantes['porcentaje'][0]
datos_moda1 = [moda1,veces1,porcentaje1]
moda2 = tabla_importantes.index[1]
veces2 = tabla_importantes['conteo'][1]
porcentaje2 = tabla_importantes['porcentaje'][1]
datos_moda2 = [moda2,veces2,porcentaje2]
moda3 = tabla_importantes.index[2]
veces3 = tabla_importantes['conteo'][2]
porcentaje3 = tabla_importantes['porcentaje'][2]
datos_moda3 = [moda3,veces3,porcentaje3]
datos_variable = [tipo_dato,num_categorias,num_observaciones,num_obs_nulas,por_obs_nulas,
valores_unicos,datos_moda1,datos_moda2,datos_moda3]
dataframe_profiling_categoricas[col]=datos_variable
return dataframe_profiling_categoricas
def genera_profiling_de_texto(df,lista_texto,vars_type):
"""
Función que genera un perfilamiento para los datos de tipo texto.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento para variables de texto.
- lista_texto: una lista con el nombre de las variables que son de tipo texto (object).
- vars_type: tabla generada por la función cuenta_tipo_de_dato de este mismo script.
* Return:
- Data Frame: Data Frame con el perfilamiento para las variables categóricas.
==========
Ejemplo:
>>vars_type = cuenta_tipo_de_dato(df)
# Extraemos el nombre de las variables de texto:
>>variables_texto = vars_type.loc[vars_type["tipo"] == "object"]
>>lista_texto = list(variables_texto['variable'])
# Generamos el perfilamiento para esas variables
>>profiling_de_texto = genera_profiling_de_texto(df,lista_texto,vars_type)
"""
# Obtenemos los estadísticos de la columna si es catagorica
lista_perfilamiento_txt = ['Tipo','Número de observaciones', 'Observaciones únicas', '% Observaciones únicas',
'Observaciones nulas', '% Observaciones nulas', 'Tamaño promedio','Tamaño mínimo','Tamaño máximo']
datos_dataframe_profiling_txt = {'Métrica':lista_perfilamiento_txt}
dataframe_profiling_txt = pd.DataFrame(data=datos_dataframe_profiling_txt)
for col in lista_texto:
#tipo de dato
vars_type_txt = pd.DataFrame(vars_type)
#vars_type_txt
df_tipo = pd.DataFrame(data=vars_type_txt.loc[vars_type_txt["variable"] == col])
tipo_dato=df_tipo['tipo'][0]
#Obtenemos las métricas relevantes
descr_col = df[col]
descr_col = pd.DataFrame(descr_col)
descr_col['Métrica']=descr_col.index
descr_col.columns=['valor','Métrica']
#Numero de observaciones
num_observaciones=len(descr_col)
#Observaciones unicas
num_obs_unicas=df[col].nunique()
#%Observaciones unicas
por_obs_unicas=num_obs_unicas/num_observaciones
#Valores nulos
num_obs_nulas=df[col].isna().sum()
#%Valores nulos
por_obs_nulas=num_obs_nulas/num_observaciones
#%Tamaño promedio
tam_prom=df[col].str.len().mean()
#tam_prom=agua[col].apply(len).mean()
#%Tamaño minimo
tam_min=df[col].str.len().min()
#tam_min=agua[col].apply(len).min()
#%Tamaño maximo
tam_max=df[col].str.len().max()
#tam_max=agua[col].apply(len).max()
datos_variable = [tipo_dato,num_observaciones,num_obs_unicas,por_obs_unicas,num_obs_nulas,por_obs_nulas,tam_prom,tam_min,tam_max]
dataframe_profiling_txt[col]=datos_variable
return dataframe_profiling_txt
def genera_profiling_de_fechas(df,lista_date,vars_type):
"""
"""
# Obtenemos los estadísticos de la columna si es fecha
lista_perfilamiento_fechas = ['Tipo','Número de observaciones', 'Mínimo','Máximo',
'Número de observaciones únicas','Número de faltantes','Top1/veces/%',
'Top2/veces/%','Top3/veces/%']
datos_dataframe_profiling_fechas = {'Métrica':lista_perfilamiento_fechas}
dataframe_profiling_fechas = pd.DataFrame(data = datos_dataframe_profiling_fechas)
for col in lista_date:
# tipo de dato
vars_type_fechas = | pd.DataFrame(vars_type) | pandas.DataFrame |
"""
국토교통부 Open API
molit(Ministry of Land, Infrastructure and Transport)
1. Transaction 클래스: 부동산 실거래가 조회
- AptTrade: 아파트매매 실거래자료 조회
- AptTradeDetail: 아파트매매 실거래 상세 자료 조회
- AptRent: 아파트 전월세 자료 조회
- AptOwnership: 아파트 분양권전매 신고 자료 조회
- OffiTrade: 오피스텔 매매 신고 조회
- OffiRent: 오피스텔 전월세 신고 조회
- RHTrade: 연립다세대 매매 실거래자료 조회
- RHRent: 연립다세대 전월세 실거래자료 조회
- DHTrade: 단독/다가구 매매 실거래 조회
- DHRent: 단독/다가구 전월세 자료 조회
- LandTrade: 토지 매매 신고 조회
- BizTrade: 상업업무용 부동산 매매 신고 자료 조회
2. Building 클래스: 건축물대장정보 서비스
01 건축물대장 기본개요 조회: getBrBasisOulnInfo
02 건축물대장 총괄표제부 조회: getBrRecapTitleInfo
03 건축물대장 표제부 조회: getBrTitleInfo
04 건축물대장 층별개요 조회: getBrFlrOulnInfo
05 건축물대장 부속지번 조회: getBrAtchJibunInfo
06 건축물대장 전유공용면적 조회: getBrExposPubuseAreaInfo
07 건축물대장 오수정화시설 조회: getBrWclfInfo
08 건축물대장 주택가격 조회: getBrHsprcInfo
09 건축물대장 전유부 조회: getBrExposInfo
10 건축물대장 지역지구구역 조회: getBrJijiguInfo
"""
import datetime
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
class Transaction:
"""
부동산 실거래가 조회 클래스
"""
def __init__(self, serviceKey):
"""
공공 데이터 포털에서 발급받은 Service Key를 입력받아 초기화합니다.
"""
# Open API 서비스 키 초기화
self.serviceKey = serviceKey
# ServiceKey 유효성 검사
self.urlAptTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTrade?serviceKey="
+ self.serviceKey)
self.urlAptTradeDetail = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTradeDev?serviceKey="
+ self.serviceKey)
self.urlAptRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptRent?serviceKey="
+ self.serviceKey)
self.urlAptOwnership = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSilvTrade?serviceKey="
+ self.serviceKey)
self.urlOffiTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcOffiTrade?serviceKey="
+ self.serviceKey)
self.urlOffiRent = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcOffiRent?serviceKey="
+ self.serviceKey)
self.urlRHTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcRHTrade?serviceKey="
+ self.serviceKey)
self.urlRHRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcRHRent?serviceKey="
+ self.serviceKey)
self.urlDHTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSHTrade?serviceKey="
+ self.serviceKey)
self.urlDHRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSHRent?serviceKey="
+ self.serviceKey)
self.urlLandTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcLandTrade?serviceKey="
+ self.serviceKey)
self.urlBizTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcNrgTrade?serviceKey="
+ self.serviceKey)
# Open API URL Dict
urlDict = {
"아파트매매 실거래자료 조회": self.urlAptTrade,
"아파트매매 실거래 상세 자료 조회": self.urlAptTradeDetail,
"아파트 전월세 자료 조회": self.urlAptRent,
"아파트 분양권전매 신고 자료 조회": self.urlAptOwnership,
"오피스텔 매매 신고 조회": self.urlOffiTrade,
"오피스텔 전월세 신고 조회": self.urlOffiRent,
"연립다세대 매매 실거래자료 조회": self.urlRHTrade,
"연립다세대 전월세 실거래자료 조회": self.urlRHRent,
"단독/다가구 매매 실거래 조회": self.urlDHTrade,
"단독/다가구 전월세 자료 조회": self.urlDHRent,
"토지 매매 신고 조회": self.urlLandTrade,
"상업업무용 부동산 매매 신고 자료 조회": self.urlBizTrade,
}
# 서비스 정상 작동 여부 확인
for serviceName, url in urlDict.items():
result = requests.get(url, verify=False)
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
te = xmlsoup.findAll("header")
if te[0].find("resultCode").text == "00":
print(f">>> {serviceName} 서비스가 정상 작동합니다.")
else:
print(f">>> {serviceName} 서비스키 미등록 오류입니다.")
# 지역 코드 초기화
# 법정동 코드 출처 : https://code.go.kr
path_code = "https://raw.githubusercontent.com/WooilJeong/PublicDataReader/f14e4de3410cc0f798a83ee5934070d651cbd67b/docs/%EB%B2%95%EC%A0%95%EB%8F%99%EC%BD%94%EB%93%9C%20%EC%A0%84%EC%B2%B4%EC%9E%90%EB%A3%8C.txt"
code = pd.read_csv(path_code, encoding="cp949", sep="\t")
code = code.loc[code["폐지여부"] == "존재"]
code["법정구코드"] = list(map(lambda a: str(a)[:5], list(code["법정동코드"])))
self.code = code
def CodeFinder(self, name):
"""
국토교통부 실거래가 정보 오픈API는 법정동코드 10자리 중 앞 5자리인 구를 나타내는 지역코드를 사용합니다.
API에 사용할 구 별 코드를 조회하는 메서드이며, 문자열 지역 명을 입력받고, 조회 결과를 Pandas DataFrame형식으로 출력합니다.
"""
result = self.code[self.code["법정동명"].str.contains(name)][[
"법정동명", "법정구코드"
]]
result.index = range(len(result))
return result
def DataCollector(self, service, LAWD_CD, start_date, end_date):
"""
서비스별 기간별 조회
입력: 서비스별 조회 메서드, 지역코드, 시작월(YYYYmm), 종료월(YYYYmm)
"""
start_date = datetime.datetime.strptime(str(start_date), "%Y%m")
start_date = datetime.datetime.strftime(start_date, "%Y-%m")
end_date = datetime.datetime.strptime(str(end_date), "%Y%m")
end_date = end_date + datetime.timedelta(days=31)
end_date = datetime.datetime.strftime(end_date, "%Y-%m")
ts = pd.date_range(start=start_date, end=end_date, freq="m")
date_list = list(ts.strftime("%Y%m"))
df = pd.DataFrame()
df_sum = pd.DataFrame()
for m in date_list:
print(">>> LAWD_CD :", LAWD_CD, "DEAL_YMD :", m)
DEAL_YMD = m
df = service(LAWD_CD, DEAL_YMD)
df_sum = pd.concat([df_sum, df])
df_sum.index = range(len(df_sum))
return df_sum
def AptTrade(self, LAWD_CD, DEAL_YMD):
"""
01 아파트매매 실거래자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlAptTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"아파트",
"지번",
"년",
"월",
"일",
"건축년도",
"전용면적",
"층",
"거래금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 아파트, 지번, 년, 월, 일, 건축년도, 전용면적, 층, 거래금액]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드", "법정동", "거래일", "아파트", "지번", "전용면적", "층", "건축년도", "거래금액"
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["거래금액"] = pd.to_numeric(df["거래금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df["아파트"] = df["아파트"].str.strip()
df.index = range(len(df))
# 형 변환
cols = df.columns.drop(["법정동", "거래일", "아파트", "지번"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptTradeDetail(self, LAWD_CD, DEAL_YMD):
"""
02 아파트매매 실거래 상세 자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlAptTradeDetail + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"거래금액",
"건축년도",
"년",
"도로명",
"도로명건물본번호코드",
"도로명건물부번호코드",
"도로명시군구코드",
"도로명일련번호코드",
"도로명지상지하코드",
"도로명코드",
"법정동",
"법정동본번코드",
"법정동부번코드",
"법정동시군구코드",
"법정동읍면동코드",
"법정동지번코드",
"아파트",
"월",
"일",
"전용면적",
"지번",
"지역코드",
"층",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
거래금액,
건축년도,
년,
도로명,
도로명건물본번호코드,
도로명건물부번호코드,
도로명시군구코드,
도로명일련번호코드,
도로명지상지하코드,
도로명코드,
법정동,
법정동본번코드,
법정동부번코드,
법정동시군구코드,
법정동읍면동코드,
법정동지번코드,
아파트,
월,
일,
전용면적,
지번,
지역코드,
층,
]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드",
"법정동",
"거래일",
"아파트",
"지번",
"전용면적",
"층",
"건축년도",
"거래금액",
"법정동본번코드",
"법정동부번코드",
"법정동시군구코드",
"법정동읍면동코드",
"법정동지번코드",
"도로명",
"도로명건물본번호코드",
"도로명건물부번호코드",
"도로명시군구코드",
"도로명일련번호코드",
"도로명지상지하코드",
"도로명코드",
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["거래금액"] = pd.to_numeric(df["거래금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df["아파트"] = df["아파트"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "아파트", "지번", "도로명"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptRent(self, LAWD_CD, DEAL_YMD):
"""
03 아파트 전월세 자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlAptRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"아파트",
"지번",
"년",
"월",
"일",
"건축년도",
"전용면적",
"층",
"보증금액",
"월세금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 아파트, 지번, 년, 월, 일, 건축년도, 전용면적, 층, 보증금액, 월세금액]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드",
"법정동",
"거래일",
"아파트",
"지번",
"전용면적",
"층",
"건축년도",
"보증금액",
"월세금액",
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["보증금액"] = pd.to_numeric(df["보증금액"].str.replace(",", ""))
df["월세금액"] = pd.to_numeric(df["월세금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "지번", "아파트"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptOwnership(self, LAWD_CD, DEAL_YMD):
"""
04 아파트 분양권전매 신고 자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlAptOwnership + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"시군구",
"단지",
"지번",
"구분",
"년",
"월",
"일",
"전용면적",
"층",
"거래금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 시군구, 단지, 지번, 구분, 년, 월, 일, 전용면적, 층, 거래금액]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드",
"법정동",
"거래일",
"시군구",
"단지",
"지번",
"구분",
"전용면적",
"층",
"거래금액",
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["거래금액"] = pd.to_numeric(df["거래금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "시군구", "단지", "지번", "구분"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def OffiTrade(self, LAWD_CD, DEAL_YMD):
"""
05 오피스텔 매매 신고 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlOffiTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"시군구",
"단지",
"지번",
"년",
"월",
"일",
"전용면적",
"층",
"거래금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 시군구, 단지, 지번, 년, 월, 일, 전용면적, 층, 거래금액]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드", "법정동", "거래일", "시군구", "단지", "지번", "전용면적", "층", "거래금액"
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["거래금액"] = pd.to_numeric(df["거래금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "시군구", "단지", "지번"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def OffiRent(self, LAWD_CD, DEAL_YMD):
"""
06 오피스텔 전월세 신고 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlOffiRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"시군구",
"단지",
"지번",
"년",
"월",
"일",
"전용면적",
"층",
"보증금",
"월세",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 시군구, 단지, 지번, 년, 월, 일, 전용면적, 층, 보증금, 월세]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드",
"법정동",
"거래일",
"시군구",
"단지",
"지번",
"전용면적",
"층",
"보증금",
"월세",
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["보증금"] = pd.to_numeric(df["보증금"].str.replace(",", ""))
df["월세"] = pd.to_numeric(df["월세"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "시군구", "단지", "지번"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def RHTrade(self, LAWD_CD, DEAL_YMD):
"""
07 연립다세대 매매 실거래자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlRHTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"연립다세대",
"지번",
"년",
"월",
"일",
"전용면적",
"건축년도",
"층",
"거래금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 연립다세대, 지번, 년, 월, 일, 전용면적, 건축년도, 층, 거래금액]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드",
"법정동",
"거래일",
"연립다세대",
"지번",
"전용면적",
"건축년도",
"층",
"거래금액",
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["거래금액"] = pd.to_numeric(df["거래금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "연립다세대", "지번"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def RHRent(self, LAWD_CD, DEAL_YMD):
"""
08 연립다세대 전월세 실거래자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlRHRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"연립다세대",
"지번",
"년",
"월",
"일",
"전용면적",
"건축년도",
"층",
"보증금액",
"월세금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
법정동, 지역코드, 연립다세대, 지번, 년, 월, 일, 전용면적, 건축년도, 층, 보증금액,
월세금액
]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드",
"법정동",
"거래일",
"연립다세대",
"지번",
"전용면적",
"건축년도",
"층",
"보증금액",
"월세금액",
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["보증금액"] = pd.to_numeric(df["보증금액"].str.replace(",", ""))
df["월세금액"] = pd.to_numeric(df["월세금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "연립다세대", "지번"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def DHTrade(self, LAWD_CD, DEAL_YMD):
"""
09 단독/다가구 매매 실거래 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlDHTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"주택유형",
"년",
"월",
"일",
"대지면적",
"연면적",
"건축년도",
"거래금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 주택유형, 년, 월, 일, 대지면적, 연면적, 건축년도, 거래금액]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드", "법정동", "거래일", "주택유형", "대지면적", "연면적", "건축년도", "거래금액"
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["거래금액"] = pd.to_numeric(df["거래금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "주택유형"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def DHRent(self, LAWD_CD, DEAL_YMD):
"""
10 단독/다가구 전월세 자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlDHRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = | pd.DataFrame() | pandas.DataFrame |
import re
from datetime import datetime
import nose
import pytz
import platform
from time import sleep
import os
import logging
import numpy as np
from distutils.version import StrictVersion
from pandas import compat
from pandas import NaT
from pandas.compat import u, range
from pandas.core.frame import DataFrame
import pandas.io.gbq as gbq
import pandas.util.testing as tm
from pandas.compat.numpy import np_datetime64_compat
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
_IMPORTS = False
_GOOGLE_API_CLIENT_INSTALLED = False
_GOOGLE_API_CLIENT_VALID_VERSION = False
_HTTPLIB2_INSTALLED = False
_SETUPTOOLS_INSTALLED = False
def _skip_if_no_project_id():
if not _get_project_id():
raise nose.SkipTest(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json file path")
def _skip_if_no_private_key_contents():
if not _get_private_key_contents():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json contents")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
else:
return PROJECT_ID
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return PRIVATE_KEY_JSON_PATH
def _get_private_key_contents():
if _in_travis_environment():
with open(os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])) as f:
return f.read()
else:
return PRIVATE_KEY_JSON_CONTENTS
def _test_imports():
global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \
_HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED
try:
import pkg_resources
_SETUPTOOLS_INSTALLED = True
except ImportError:
_SETUPTOOLS_INSTALLED = False
if compat.PY3:
google_api_minimum_version = '1.4.1'
else:
google_api_minimum_version = '1.2.0'
if _SETUPTOOLS_INSTALLED:
try:
try:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
except:
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
from oauth2client.client import OAuth2WebServerFlow # noqa
from oauth2client.client import AccessTokenRefreshError # noqa
from oauth2client.file import Storage # noqa
from oauth2client.tools import run_flow # noqa
_GOOGLE_API_CLIENT_INSTALLED = True
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution(
'google-api-python-client').version
if (StrictVersion(_GOOGLE_API_CLIENT_VERSION) >=
StrictVersion(google_api_minimum_version)):
_GOOGLE_API_CLIENT_VALID_VERSION = True
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
try:
import httplib2 # noqa
_HTTPLIB2_INSTALLED = True
except ImportError:
_HTTPLIB2_INSTALLED = False
if not _SETUPTOOLS_INSTALLED:
raise ImportError('Could not import pkg_resources (setuptools).')
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('Could not import Google API Client.')
if not _GOOGLE_API_CLIENT_VALID_VERSION:
raise ImportError("pandas requires google-api-python-client >= {0} "
"for Google BigQuery support, "
"current version {1}"
.format(google_api_minimum_version,
_GOOGLE_API_CLIENT_VERSION))
if not _HTTPLIB2_INSTALLED:
raise ImportError(
"pandas requires httplib2 for Google BigQuery support")
# Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
# - ServiceAccountCredentials from oauth2client.service_account
# SignedJwtAssertionCredentials is available in oauthclient < 2.0.0
# ServiceAccountCredentials is available in oauthclient >= 2.0.0
oauth2client_v1 = True
oauth2client_v2 = True
try:
from oauth2client.client import SignedJwtAssertionCredentials # noqa
except ImportError:
oauth2client_v1 = False
try:
from oauth2client.service_account import ServiceAccountCredentials # noqa
except ImportError:
oauth2client_v2 = False
if not oauth2client_v1 and not oauth2client_v2:
raise ImportError("Missing oauth2client required for BigQuery "
"service account support")
def _setup_common():
try:
_test_imports()
except (ImportError, NotImplementedError) as import_exception:
raise nose.SkipTest(import_exception)
if _in_travis_environment():
logging.getLogger('oauth2client').setLevel(logging.ERROR)
logging.getLogger('apiclient').setLevel(logging.ERROR)
def _check_if_can_get_correct_default_credentials():
# Checks if "Application Default Credentials" can be fetched
# from the environment the tests are running in.
# See Issue #13577
import httplib2
try:
from googleapiclient.discovery import build
except ImportError:
from apiclient.discovery import build
try:
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
http = httplib2.Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
jobs = bigquery_service.jobs()
job_data = {'configuration': {'query': {'query': 'SELECT 1'}}}
jobs.insert(projectId=_get_project_id(), body=job_data).execute()
return True
except:
return False
def clean_gbq_environment(private_key=None):
dataset = gbq._Dataset(_get_project_id(), private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with tm.assert_produces_warning(FutureWarning):
df = make_mixed_dataframe_v2(10)
gbq.generate_bq_schema(df)
class TestGBQConnectorIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
def test_get_application_default_credentials_does_not_throw_error(self):
if _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Can get default_credentials "
"from the environment!")
credentials = self.sut.get_application_default_credentials()
self.assertIsNone(credentials)
def test_get_application_default_credentials_returns_credentials(self):
if not _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Cannot get default_credentials "
"from the environment!")
from oauth2client.client import GoogleCredentials
credentials = self.sut.get_application_default_credentials()
self.assertTrue(isinstance(credentials, GoogleCredentials))
class TestGBQConnectorServiceAccountKeyPathIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class TestGBQConnectorServiceAccountKeyContentsIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class GBQUnitTests(tm.TestCase):
def setUp(self):
_setup_common()
def test_import_google_api_python_client(self):
if compat.PY2:
with tm.assertRaises(ImportError):
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
else:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
def test_should_return_bigquery_integers_as_python_floats(self):
result = gbq._parse_entry(1, 'INTEGER')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_floats_as_python_floats(self):
result = gbq._parse_entry(1, 'FLOAT')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_timestamps_as_numpy_datetime(self):
result = gbq._parse_entry('0e9', 'TIMESTAMP')
tm.assert_equal(result, np_datetime64_compat('1970-01-01T00:00:00Z'))
def test_should_return_bigquery_booleans_as_python_booleans(self):
result = gbq._parse_entry('false', 'BOOLEAN')
tm.assert_equal(result, False)
def test_should_return_bigquery_strings_as_python_strings(self):
result = gbq._parse_entry('STRING', 'STRING')
tm.assert_equal(result, 'STRING')
def test_to_gbq_should_fail_if_invalid_table_name_passed(self):
with tm.assertRaises(gbq.NotFoundException):
gbq.to_gbq(DataFrame(), 'invalid_table_name', project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.to_gbq(DataFrame(), 'dataset.tablename')
def test_read_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.read_gbq('SELECT "1" as NUMBER_1')
def test_that_parse_data_works_properly(self):
test_schema = {'fields': [
{'mode': 'NULLABLE', 'name': 'VALID_STRING', 'type': 'STRING'}]}
test_page = [{'f': [{'v': 'PI'}]}]
test_output = gbq._parse_data(test_schema, test_page)
correct_output = DataFrame({'VALID_STRING': ['PI']})
tm.assert_frame_equal(test_output, correct_output)
def test_read_gbq_with_invalid_private_key_json_should_fail(self):
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='y')
def test_read_gbq_with_empty_private_key_json_should_fail(self):
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='{}')
def test_read_gbq_with_private_key_json_wrong_types_should_fail(self):
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key='{ "client_email" : 1, "private_key" : True }')
def test_read_gbq_with_empty_private_key_file_should_fail(self):
with tm.ensure_clean() as empty_file_path:
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x',
private_key=empty_file_path)
def test_read_gbq_with_corrupted_private_key_json_should_fail(self):
_skip_if_no_private_key_path()
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key=re.sub('[a-z]', '9', _get_private_key_path()))
class TestReadGBQIntegration(tm.TestCase):
@classmethod
def setUpClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_setup_common()
def setUp(self):
# - PER-TEST FIXTURES -
# put here any instruction you want to be run *BEFORE* *EVERY* test is
# executed.
pass
@classmethod
def tearDownClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
pass
def tearDown(self):
# - PER-TEST FIXTURES -
# put here any instructions you want to be run *AFTER* *EVERY* test is
# executed.
pass
def test_should_read_as_user_account(self):
if _in_travis_environment():
raise nose.SkipTest("Cannot run local auth in travis environment")
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_read_as_service_account_with_key_path(self):
_skip_if_no_private_key_path()
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_read_as_service_account_with_key_contents(self):
_skip_if_no_private_key_contents()
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_contents())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_properly_handle_valid_strings(self):
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_properly_handle_empty_strings(self):
query = 'SELECT "" as EMPTY_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'EMPTY_STRING': [""]}))
def test_should_properly_handle_null_strings(self):
query = 'SELECT STRING(NULL) as NULL_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_STRING': [None]}))
def test_should_properly_handle_valid_integers(self):
query = 'SELECT INTEGER(3) as VALID_INTEGER'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'VALID_INTEGER': [3]}))
def test_should_properly_handle_null_integers(self):
query = 'SELECT INTEGER(NULL) as NULL_INTEGER'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_INTEGER': [np.nan]}))
def test_should_properly_handle_valid_floats(self):
query = 'SELECT PI() as VALID_FLOAT'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame(
{'VALID_FLOAT': [3.141592653589793]}))
def test_should_properly_handle_null_floats(self):
query = 'SELECT FLOAT(NULL) as NULL_FLOAT'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_FLOAT': [np.nan]}))
def test_should_properly_handle_timestamp_unix_epoch(self):
query = 'SELECT TIMESTAMP("1970-01-01 00:00:00") as UNIX_EPOCH'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame(
{'UNIX_EPOCH': [np.datetime64('1970-01-01T00:00:00.000000Z')]}))
def test_should_properly_handle_arbitrary_timestamp(self):
query = 'SELECT TIMESTAMP("2004-09-15 05:00:00") as VALID_TIMESTAMP'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({
'VALID_TIMESTAMP': [np.datetime64('2004-09-15T05:00:00.000000Z')]
}))
def test_should_properly_handle_null_timestamp(self):
query = 'SELECT TIMESTAMP(NULL) as NULL_TIMESTAMP'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_TIMESTAMP': [NaT]}))
def test_should_properly_handle_true_boolean(self):
query = 'SELECT BOOLEAN(TRUE) as TRUE_BOOLEAN'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'TRUE_BOOLEAN': [True]}))
def test_should_properly_handle_false_boolean(self):
query = 'SELECT BOOLEAN(FALSE) as FALSE_BOOLEAN'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'FALSE_BOOLEAN': [False]}))
def test_should_properly_handle_null_boolean(self):
query = 'SELECT BOOLEAN(NULL) as NULL_BOOLEAN'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_BOOLEAN': [None]}))
def test_unicode_string_conversion_and_normalization(self):
correct_test_datatype = DataFrame(
{'UNICODE_STRING': [u("\xe9\xfc")]}
)
unicode_string = "\xc3\xa9\xc3\xbc"
if compat.PY3:
unicode_string = unicode_string.encode('latin-1').decode('utf8')
query = 'SELECT "{0}" as UNICODE_STRING'.format(unicode_string)
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, correct_test_datatype)
def test_index_column(self):
query = "SELECT 'a' as STRING_1, 'b' as STRING_2"
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
index_col="STRING_1",
private_key=_get_private_key_path())
correct_frame = DataFrame(
{'STRING_1': ['a'], 'STRING_2': ['b']}).set_index("STRING_1")
tm.assert_equal(result_frame.index.name, correct_frame.index.name)
def test_column_order(self):
query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3"
col_order = ['STRING_3', 'STRING_1', 'STRING_2']
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
col_order=col_order,
private_key=_get_private_key_path())
correct_frame = DataFrame({'STRING_1': ['a'], 'STRING_2': [
'b'], 'STRING_3': ['c']})[col_order]
| tm.assert_frame_equal(result_frame, correct_frame) | pandas.util.testing.assert_frame_equal |
# Implements a quick and dirty genetic algorithm to search hyperparameters
# Would be better (and more general) with object-oriented re-implementation
# each hyperparameter is its own class with methods on how it varies, is randomly generated, etc
# overall hyperparameters class that has a dictionary of its hyperparameters
# Uses pandas dataframe
import pandas as pd
import numpy as np
from numpy import random
import tensorflow as tf
import deepchem as dc
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score
from math import ceil, log
# set global variables for min/max for each parameter
N_HIDDEN = [10, 80]
N_LAYERS = [1, 7]
LEARNING_RATE = [-3, 0]
LEARNING_RATE_TYPE = 'log_uniform'
DROPOUT_PROB = [0.2, 0.8]
N_EPOCHS = [10, 80]
BATCH_SIZE = [8, 1024]
def import_dc_data():
"""import the deepchem data, delete additional task labels, export train/validation/test sets"""
_, (train, valid, test), _ = dc.molnet.load_tox21()
train_X, train_y, train_w = train.X, train.y, train.w
valid_X, valid_y, valid_w = valid.X, valid.y, valid.w
test_X, test_y, test_w = test.X, test.y, test.w
# Remove extra tasks
train_y = train_y[:, 0]
valid_y = valid_y[:, 0]
test_y = test_y[:, 0]
train_w = train_w[:, 0]
valid_w = valid_w[:, 0]
test_w = test_w[:, 0]
# return the data as a dictionary
dc_data = {'train_X': train_X, 'valid_X': valid_X, 'test_X': test_X,
'train_y': train_y, 'valid_y': valid_y, 'test_y': test_y,
'train_w': train_w, 'valid_w': valid_w, 'test_w': test_w}
return dc_data
def eval_tox21_hyperparams(dc_data, n_hidden=50, n_layers=1, learning_rate=.001,
dropout_prob=0.5, n_epochs=45, batch_size=100,
weight_positives=True):
d = 1024
graph = tf.Graph()
with graph.as_default():
# Generate tensorflow graph
with tf.name_scope("placeholders"):
x = tf.placeholder(tf.float32, (None, d))
y = tf.placeholder(tf.float32, (None,))
w = tf.placeholder(tf.float32, (None,))
keep_prob = tf.placeholder(tf.float32)
for layer in range(n_layers):
with tf.name_scope("layer-%d" % layer):
W = tf.Variable(tf.random_normal((d, n_hidden)))
b = tf.Variable(tf.random_normal((n_hidden,)))
x_hidden = tf.nn.relu(tf.matmul(x, W) + b)
# Apply dropout
x_hidden = tf.nn.dropout(x_hidden, keep_prob)
with tf.name_scope("output"):
W = tf.Variable(tf.random_normal((n_hidden, 1)))
b = tf.Variable(tf.random_normal((1,)))
y_logit = tf.matmul(x_hidden, W) + b
# the sigmoid gives the class probability of 1
y_one_prob = tf.sigmoid(y_logit)
# Rounding P(y=1) will give the correct prediction.
y_pred = tf.round(y_one_prob)
with tf.name_scope("loss"):
# Compute the cross-entropy term for each datapoint
y_expand = tf.expand_dims(y, 1)
entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit, labels=y_expand)
# Multiply by weights
if weight_positives:
w_expand = tf.expand_dims(w, 1)
entropy = w_expand * entropy
# Sum all contributions
l = tf.reduce_sum(entropy)
with tf.name_scope("optim"):
train_op = tf.train.AdamOptimizer(learning_rate).minimize(l)
with tf.name_scope("summaries"):
tf.summary.scalar("loss", l)
merged = tf.summary.merge_all()
# For tensorboard visualization
# hyperparam_str = "d-%d-hidden-%d-lr-%f-n_epochs-%d-batch_size-%d-weight_pos-%s" % (
# d, n_hidden, learning_rate, n_epochs, batch_size, str(weight_positives))
# train_writer = tf.summary.FileWriter('/tmp/fcnet-func-' + hyperparam_str,
# tf.get_default_graph())
N = dc_data['train_X'].shape[0]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(n_epochs):
pos = 0
while pos < N:
batch_X = dc_data['train_X'][pos:pos+batch_size]
batch_y = dc_data['train_y'][pos:pos+batch_size]
batch_w = dc_data['train_w'][pos:pos+batch_size]
feed_dict = {x: batch_X, y: batch_y, w: batch_w, keep_prob: dropout_prob}
_, summary, loss = sess.run([train_op, merged, l], feed_dict=feed_dict)
# print("epoch %d, step %d, loss: %f" % (epoch, step, loss))
# train_writer.add_summary(summary, step)
step += 1
pos += batch_size
# Make Predictions (set keep_prob to 1.0 for predictions)
valid_y_pred = sess.run(y_pred, feed_dict={x: dc_data['valid_X'], keep_prob: 1.0})
valid_y = dc_data['valid_y'] # get labels
acc = accuracy_score(valid_y, valid_y_pred, sample_weight=dc_data['valid_w'])
prec = precision_score(valid_y, valid_y_pred) # can't weight?
recall = recall_score(valid_y, valid_y_pred) # can't weight?
roc_auc = roc_auc_score(valid_y, valid_y_pred)
return (acc, prec, recall, roc_auc)
# build a dataframe with for model hyperparameters and evaluation metrics
def eval_log(name=None, n_hidden=50, n_layers=1, learning_rate=.001,
dropout_prob=0.5, n_epochs=45, batch_size=100,
weight_positives=True):
""" Evaluates the model on the hyperparameters supplied as arguments,
returns the results as a pd.Series """
# run the model
(acc, prec, rec, auc) = eval_tox21_hyperparams(dc_data, n_hidden=n_hidden, n_layers=n_layers,
learning_rate=learning_rate,
dropout_prob=dropout_prob, n_epochs=n_epochs,
batch_size=batch_size, weight_positives=weight_positives)
# create a dict
hparams = {'n_hidden': n_hidden,
'n_layers': n_layers,
'learning_rate': learning_rate,
'dropout_prob': dropout_prob,
'batch_size': batch_size,
'weight_positives': weight_positives,
'accuracy_score': acc,
'precision_score': prec,
'recall_score': rec,
'auc': auc}
return pd.Series(hparams, name=name, index=hparams.keys())
def get_random_hparams(n, n_hidden=N_HIDDEN, n_layers=N_LAYERS, learning_rate=LEARNING_RATE,
learning_rate_type=LEARNING_RATE_TYPE,
dropout_prob=DROPOUT_PROB, n_epochs=N_EPOCHS, batch_size=BATCH_SIZE):
""" creates n sets of hyperparameters randomly. default arguments represent random bounds. weight_positives is
probability of True"""
arr_n_hidden = random.randint(n_hidden[0], n_hidden[1], size=n, dtype=int)
arr_n_layers = random.randint(n_layers[0], n_layers[1], size=n, dtype=int)
rand_lr = min(learning_rate) + ((learning_rate[1] - learning_rate[0]) * random.rand(n))
if learning_rate_type == 'log_uniform':
arr_learning_rate = np.power(10, rand_lr)
else:
arr_learning_rate = rand_lr
arr_dropout_prob = min(dropout_prob) + ((dropout_prob[1] - dropout_prob[0]) * random.rand(n))
arr_n_epochs = random.randint(n_epochs[0], n_epochs[1], size=n, dtype=int)
arr_batch_size = random.randint(batch_size[0], batch_size[1], size=n, dtype=int)
arr_weight_positives = random.choice([True, False], size=n)
return (arr_n_hidden, arr_n_layers, arr_learning_rate, arr_dropout_prob,
arr_n_epochs, arr_batch_size, arr_weight_positives)
def run_n_models(dc_data, hparams_tuple):
""" takes a dictionary of hyperparameters (each is an array) and runs the model on all the params in the array.
returns a dictionary of arrays with output metrics """
(arr_n_hidden, arr_n_layers, arr_learning_rate,
arr_dropout_prob, arr_n_epochs, arr_batch_size, arr_weight_positives) = hparams_tuple
# create empty arrays for output metrics
n = len(arr_n_hidden)
acc = np.zeros(n)
prec = np.zeros(n)
rec = np.zeros(n)
auc = np.zeros(n)
# use a dirty for loop for now
for i in range(n):
(acc[i], prec[i], rec[i], auc[i]) = eval_tox21_hyperparams(dc_data, n_hidden=arr_n_hidden[i],
n_layers=arr_n_layers[i],
learning_rate=arr_learning_rate[i],
dropout_prob=arr_dropout_prob[i],
n_epochs=arr_n_epochs[i],
batch_size=arr_batch_size[i],
weight_positives=arr_weight_positives[i])
# return tuple of arrays
return (acc, prec, rec, auc)
def eval_n_models(dc_data, n=5, generation=None, init="random", hparams=None):
"""evaluates n different models. Generates hyperparameters randomly if not specified."""
if init == 'hparams':
params = hparams
# default to init='random'
else:
params = get_random_hparams(n)
(acc, prec, rec, auc) = run_n_models(dc_data, params)
# if epoch is specified, write it as a column
dict = {'generation': pd.Series(np.full(n, generation)),
'n_hidden': pd.Series(params[0]),
'n_layers': pd.Series(params[1]),
'learning_rate': pd.Series(params[2]),
'dropout_prob': pd.Series(params[3]),
'n_epochs': pd.Series(params[4]),
'batch_size': pd.Series(params[5]),
'weight_positives': pd.Series(params[6]),
'acc': pd.Series(acc),
'prec': | pd.Series(prec) | pandas.Series |
'''
This library is used to preprocess raw images (resizing, denoising) for supervised learning.
The automated filtering for image quality filtering for supervised learning is addressed in this library.
The input for the library is relative path for raw image folder and resized image folder.
Ref : MSCN values are calculated based on https://www.learnopencv.com/image-quality-assessment-brisque/
'''
#import all libraries
import os, glob
import pandas as pd
import numpy as np
import cv2, csv, json, math, time, argparse
from pathlib import Path
from PIL import Image, ImageOps,ImageFilter
from scipy import ndimage
import imageio
from pylab import *
# Pandas column names for storing resized and preprocessed images
logo_folder_columns = ['company_brand','folder_mean_val','folder_std_val','folder_mscn_val']
# Pandas column names for storing statistical parameters for image quality filtering
processed_image_columns =['true_logo','logo_img_name','original_image_name','company_brand','mean_mean_val','mean_std_val','mscn_val','img_path']
# Sampling images for balancing the images from each class
SAMPLE_SIZE = 200
EXPECTED_DIMENSION=120
SIZE_THRESHOLD=17
#Image processing for resizing images
def fix_aspect_ratio(img):
original_max_dim = max(float(img.size[0]),float(img.size[1]))
original_min_dim = min(float(img.size[0]),float(img.size[1]))
wpercent = (EXPECTED_DIMENSION/float(original_max_dim))
hsize = int((original_min_dim*float(wpercent)))
new_im = img.resize((EXPECTED_DIMENSION,hsize), Image.ANTIALIAS)
return new_im
# Make square images
def make_square(img, min_size=120, fill_color=(0, 0, 0, 0)):
x, y = img.size
size = max(min_size, x, y)
new_im = Image.new('RGBA', (size, size), fill_color)
new_im.paste(img, (int((size - x) / 2), int((size - y) / 2)))
return new_im
# Sharpen the edges
def sharpen_filter(img):
sharp_im = img.filter(ImageFilter.SHARPEN)
return sharp_im
# Statistical properties of each image
def calculate_MSCN_val(img):
C=3.0/255.0
blurred_img=cv2.GaussianBlur(img, (7, 7), 1.166)
blurred_sq = blurred_img * blurred_img
sigma = cv2.GaussianBlur(img * img, (7, 7), 1.166)
sigma = (sigma - blurred_sq) ** 0.5
sigma = sigma + C
MCSN_value = (img - blurred_img)/sigma
return MCSN_value
# Get per brand logo image statistics
def get_folder_stats(df_logos_folder_label, df_folder_details,dir_path):
df_logos_folder = pd.DataFrame(columns=logo_folder_columns)
folders = ([name for name in sorted(os.listdir(dir_path), key=str.casefold)]) # get all directories
for company_name in folders:
company_name=company_name[4:len(company_name)] # Remove Mod- from the name
df_rows=df_folder_details.loc[df_folder_details['company_brand'] == company_name]
mean_mean_val=df_rows["mean_mean_val"].mean()
mean_std_val=df_rows["mean_std_val"].mean()
mean_mscn_val=df_rows["mscn_val"].mean()
row = pd.Series({logo_folder_columns[0] :company_name,
logo_folder_columns[1]: mean_mean_val,
logo_folder_columns[2]: mean_std_val,
logo_folder_columns[3]: mean_mscn_val,})
df_logos_folder = df_logos_folder.append(row,ignore_index=True)
return df_logos_folder
# Load dataset of logos in a dataframe
def get_file_excluded_1(folder_path_global, company_name,dir_litw_resized, folder_logo_num, threshold_mean,threshold_std,threshold_mscn):
df_logos_pickle = | pd.DataFrame(columns=processed_image_columns) | pandas.DataFrame |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = f"len of {i}, but only 5 lines in file"
with pytest.raises(ParserError, match=msg):
read_csv(path, header=list(range(i)), index_col=0)
# write with cols
msg = "cannot specify cols with a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.to_csv(path, columns=["foo", "bar"])
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_interval_index(self):
# GH 28210
df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
df.to_csv(path)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = df.copy()
expected.index = expected.index.astype(str)
tm.assert_frame_equal(result, expected)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with tm.ensure_clean("__tmp_to_csv_float32_nanrep__.csv") as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(",")[2] == "999"
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]})
with tm.ensure_clean("__tmp_to_csv_withcommas__.csv") as path:
df.to_csv(path)
df2 = self.read_csv(path)
tm.assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [f"{name}{i:03d}" for i in range(5)]
df_float = DataFrame(
np.random.randn(100, 5), dtype="float64", columns=create_cols("float")
)
df_int = DataFrame(
np.random.randn(100, 5).astype("int64"),
dtype="int64",
columns=create_cols("int"),
)
df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool"))
df_object = DataFrame(
"foo", index=df_float.index, columns=create_cols("object")
)
df_dt = DataFrame(
Timestamp("20010101"), index=df_float.index, columns=create_cols("date")
)
# add in some nans
df_float.iloc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = {}
for n, dtype in [
("float", np.float64),
("int", np.int64),
("bool", np.bool_),
("object", object),
]:
for c in create_cols(n):
dtypes[c] = dtype
with tm.ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(
filename, index_col=0, dtype=dtypes, parse_dates=create_cols("date")
)
tm.assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(
np.random.randn(1000, 30),
columns=list(range(15)) + list(range(15)),
dtype="float64",
)
with tm.ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
tm.assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype="float64")
df_int = DataFrame(np.random.randn(1000, 3)).astype("int64")
df_bool = DataFrame(True, index=df_float.index, columns=range(3))
df_object = DataFrame("foo", index=df_float.index, columns=range(3))
df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3))
df = pd.concat(
[df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True
)
df.columns = [0, 1, 2] * 5
with tm.ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ["0.4", "1.4", "2.4"]:
result[i] = to_datetime(result[i])
result.columns = df.columns
tm.assert_frame_equal(result, df)
# GH3457
N = 10
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
with tm.ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={"a.1": "a"})
tm.assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({"A": range(100000)})
aa["B"] = aa.A + 1.0
aa["C"] = aa.A + 2.0
aa["D"] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with tm.ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
tm.assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with tm.ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
tm.assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO("a,1.0\nb,2.0")
df = self.read_csv(f1, header=None)
newdf = DataFrame({"t": df[df.columns[0]]})
with tm.ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
tm.assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
with tm.ensure_clean() as path:
df.to_csv(path, encoding="UTF-8")
df2 = read_csv(path, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
df.to_csv(path, encoding="UTF-8", index=False)
df2 = read_csv(path, index_col=None, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO("")
df = DataFrame(
[["\u05d0", "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=["\u05d0", "\u05d1", "\u05d2", "\u05d3"],
index=["\u05d0", "\u05d1"],
)
df.to_csv(buf, encoding="UTF-8")
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_stringio(self, float_frame):
buf = StringIO()
float_frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
tm.assert_frame_equal(recons, float_frame)
def test_to_csv_float_format(self):
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as filename:
df.to_csv(filename, float_format="%.2f")
rs = read_csv(filename, index_col=0)
xp = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(rs, xp)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({"A": [1, 2, 3], "B": ["foo", "bar", "baz"]})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC, encoding="utf-8")
result = buf.getvalue()
expected_rows = ['"A","B"', '1,"foo"', '2,"bar"', '3,"baz"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({"A": ["hello", '{"hello"}']})
for encoding in (None, "utf-8"):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE, encoding=encoding, index=False)
result = buf.getvalue()
expected_rows = ["A", "hello", '{"hello"}']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected_rows = ["A,B", "one,1,4", "two,2,5", "three,3,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
# see gh-20353
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
with tm.ensure_clean() as path:
# case 1: CRLF as line terminator
df.to_csv(path, line_terminator="\r\n")
expected = b",A,B\r\none,1,4\r\ntwo,2,5\r\nthree,3,6\r\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 2: LF as line terminator
df.to_csv(path, line_terminator="\n")
expected = b",A,B\none,1,4\ntwo,2,5\nthree,3,6\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 3: The default line terminator(=os.linesep)(gh-21406)
df.to_csv(path)
os_linesep = os.linesep.encode("utf-8")
expected = (
b",A,B"
+ os_linesep
+ b"one,1,4"
+ os_linesep
+ b"two,2,5"
+ os_linesep
+ b"three,3,6"
+ os_linesep
)
with open(path, mode="rb") as f:
assert f.read() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output
# as when one would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
s2 = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
res = StringIO()
s.to_csv(res, header=False)
exp = StringIO()
s2.to_csv(exp, header=False)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self, float_frame):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = float_frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = read_csv(StringIO(csv_str), index_col=0)
| tm.assert_frame_equal(float_frame, recons) | pandas._testing.assert_frame_equal |
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import TransformerMixin, BaseEstimator
import re
import scipy
from scipy import sparse
import gc
from sklearn.model_selection import train_test_split,KFold
from pprint import pprint
import warnings
import nltk
import string
from gensim.models import KeyedVectors, FastText
import emoji
from collections import Counter
from spacy.lang.en import English
nltk.download('stopwords')
from nltk.corpus import stopwords
warnings.filterwarnings("ignore")
pd.options.display.max_colwidth=300
from scipy.sparse import hstack
def splitter(text):
tokens = []
for word in text.split(' '):
tokens.append(word)
return tokens
def vectorizer(text,vec,fmodel):
tokens = splitter(text)
x1 = vec.transform([text]).toarray()
x2 = np.mean(fmodel.wv[tokens], axis = 0).reshape(1, -1)
x = np.concatenate([x1, x2], axis = -1).astype(np.float16)
return x
def encode_sentence(text, vocab2index, N=70):
def pre_process_text(text):
emoticons = [':-)', ':)', '(:', '(-:', ':))', '((:', ':-D', ':D', 'X-D', 'XD', 'xD', 'xD', '<3', '</3', ':\*',
';-)',
';)', ';-D', ';D', '(;', '(-;', ':-(', ':(', '(:', '(-:', ':,(', ':\'(', ':"(', ':((', ':D', '=D',
'=)',
'(=', '=(', ')=', '=-O', 'O-=', ':o', 'o:', 'O:', 'O:', ':-o', 'o-:', ':P', ':p', ':S', ':s', ':@',
':>',
':<', '^_^', '^.^', '>.>', 'T_T', 'T-T', '-.-', '*.*', '~.~', ':*', ':-*', 'xP', 'XP', 'XP', 'Xp',
':-|',
':->', ':-<', '$_$', '8-)', ':-P', ':-p', '=P', '=p', ':*)', '*-*', 'B-)', 'O.o', 'X-(', ')-X']
text = text.replace(".", " ").lower()
text = re.sub(r"[^a-zA-Z?.!,¿]+", " ", text)
users = re.findall("[@]\w+", text)
for user in users:
text = text.replace(user, "<user>")
urls = re.findall(r'(https?://[^\s]+)', text)
if len(urls) != 0:
for url in urls:
text = text.replace(url, "<url >")
for emo in text:
if emo in emoji.UNICODE_EMOJI:
text = text.replace(emo, "<emoticon >")
for emo in emoticons:
text = text.replace(emo, "<emoticon >")
numbers = re.findall('[0-9]+', text)
for number in numbers:
text = text.replace(number, "<number >")
text = text.replace('#', "<hashtag >")
text = re.sub(r"([?.!,¿])", r" ", text)
text = "".join(l for l in text if l not in string.punctuation)
text = re.sub(r'[" "]+', " ", text)
return text
tok=English()
def tokenize(text):
return [token.text for token in tok.tokenizer(pre_process_text(text))]
tokenized = tokenize(text)
encoded = np.zeros(N, dtype=int)
enc1 = np.array([vocab2index.get(word, vocab2index["UNK"]) for word in tokenized])
length = min(N, len(enc1))
encoded[:length] = enc1[:length]
return " ".join(map(str,encoded))
def create_k_folds_dataset(folds):
train=pd.read_csv(f'../input/jigsaw-toxic-comment-classification-challenge/train.csv')
#insert the kfold columns
train['kfold'] = -1
cat_mtpl = {'obscene': 0.16, 'toxic': 0.32, 'threat': 1.5,
'insult': 0.64, 'severe_toxic': 1.5, 'identity_hate': 1.5}
for category in cat_mtpl:
train[category] = train[category] * cat_mtpl[category]
train['score'] = train.loc[:, 'toxic':'identity_hate'].mean(axis=1)
train['y'] = train['score']
#distributing the data
kfold = KFold(n_splits = 5,shuffle=True,random_state = 42)
for fold, (tr_i,va_i) in enumerate(kfold.split(X=train)):
train.loc[va_i,'kfold'] = fold
train.to_csv("../input/folds/train_folds_score_5.csv",index=False)
print("successfully created folds")
class PreProcessJigsawDataset(object):
def __init__(self,folds,path):
self.folds=folds
self.path=path
self.tf_idf_vec=TfidfVectorizer(min_df= 3, max_df=0.5, analyzer = 'char_wb', ngram_range = (3,5), max_features = 46000)
self.ft_vec=FastText.load('../model/jigsaw-regression-based-data/FastText-jigsaw-256D/Jigsaw-Fasttext-Word-Embeddings-256D.bin')
def create_vectorized_dataset(self,df):
X_list = []
self.tf_idf_vec.fit(df['text'])
for text in df.text:
X_list.append(vectorizer(text,self.tf_idf_vec,self.ft_vec))
EMB_DIM = len(self.tf_idf_vec.vocabulary_) + 256
X_np = np.array(X_list).reshape(-1, EMB_DIM)
X = pd.DataFrame(X_np)
return pd.concat([X,df['y']],axis=1)
def create_jigsaw_classification_dataset_folds(self):
df = pd.read_csv(self.path+"/jigsaw-toxic-comment-classification-challenge/train.csv")
print(df.shape)
# Give more weight to severe toxic
df['severe_toxic'] = df.severe_toxic * 2
df['y'] = (df[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].sum(axis=1) ).astype(int)
df['y'] = df['y']/df['y'].max()
df = df[['comment_text', 'y']].rename(columns={'comment_text': 'text'})
df=self.create_vectorized_dataset(df)
print(df.shape)
frac_1 = 0.3
frac_1_factor = 1.2
for fld in range(self.folds):
print(f'Fold: {fld}')
tmp_df = pd.concat([df[df.y>0].sample(frac=frac_1, random_state = 10*(fld+1)) ,
df[df.y==0].sample(n=int(len(df[df.y>0])*frac_1*frac_1_factor) ,
random_state = 10*(fld+1))], axis=0).sample(frac=1, random_state = 10*(fld+1))
tmp_df.to_csv(f'{self.path}/folds/df_fld{fld}.csv', index=False)
print(tmp_df.shape)
print(tmp_df['y'].value_counts())
def create_jigsaw_clean_classification_dataset_folds(self):
stop = stopwords.words('english')
def clean(data, col):
data[col] = data[col].str.replace(r"what's", "what is ")
data[col] = data[col].str.replace(r"\'ve", " have ")
data[col] = data[col].str.replace(r"can't", "cannot ")
data[col] = data[col].str.replace(r"n't", " not ")
data[col] = data[col].str.replace(r"i'm", "i am ")
data[col] = data[col].str.replace(r"\'re", " are ")
data[col] = data[col].str.replace(r"\'d", " would ")
data[col] = data[col].str.replace(r"\'ll", " will ")
data[col] = data[col].str.replace(r"\'scuse", " excuse ")
data[col] = data[col].str.replace(r"\'s", " ")
# Clean some punctutations
data[col] = data[col].str.replace('\n', ' \n ')
data[col] = data[col].str.replace(r'([a-zA-Z]+)([/!?.])([a-zA-Z]+)',r'\1 \2 \3')
# Replace repeating characters more than 3 times to length of 3
data[col] = data[col].str.replace(r'([*!?\'])\1\1{2,}',r'\1\1\1')
# Add space around repeating characters
data[col] = data[col].str.replace(r'([*!?\']+)',r' \1 ')
# patterns with repeating characters
data[col] = data[col].str.replace(r'([a-zA-Z])\1{2,}\b',r'\1\1')
data[col] = data[col].str.replace(r'([a-zA-Z])\1\1{2,}\B',r'\1\1\1')
data[col] = data[col].str.replace(r'[ ]{2,}',' ').str.strip()
data[col] = data[col].str.replace(r'[ ]{2,}',' ').str.strip()
data[col] = data[col].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
return data
df = pd.read_csv(self.path+"/jigsaw-toxic-comment-classification-challenge/train.csv")
print(df.shape)
df['severe_toxic'] = df.severe_toxic * 2
df['y'] = (df[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].sum(axis=1) ).astype(int)
df['y'] = df['y']/df['y'].max()
df = df[['comment_text', 'y']].rename(columns={'comment_text': 'text'})
df = clean(df,'text')
frac_1 = 0.3
frac_1_factor = 1.2
for fld in range(self.folds):
print(f'Fold: {fld}')
tmp_df = pd.concat([df[df.y>0].sample(frac=frac_1, random_state = 10*(fld+1)) ,
df[df.y==0].sample(n=int(len(df[df.y>0])*frac_1*frac_1_factor) ,
random_state = 10*(fld+1))], axis=0).sample(frac=1, random_state = 10*(fld+1))
tmp_df.to_csv(f'{self.path}/folds/df_clean_fld{fld}.csv', index=False)
print(tmp_df.shape)
print(tmp_df['y'].value_counts())
def create_ruddit_dataset_folds(self):
df_ = | pd.read_csv(self.path+"/ruddit-jigsaw-dataset/Dataset/ruddit_with_text.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>
UNESCO-IHE 2016
Contact: <EMAIL>
Repository: https://github.com/wateraccounting/wa
Module: Sheets/sheet1
"""
import os
import pandas as pd
import time
import xml.etree.ElementTree as ET
import subprocess
def create_sheet3(basin, period, units, data, output, template=False):
"""
Keyword arguments:
basin -- The name of the basin
period -- The period of analysis
units -- A list with the units of the data:
[<water consumption>, <land productivity>, <water productivity>]
data -- A csv file that contains the water data. The csv file has to
follow an specific format. A sample csv is available in the link:
https://github.com/wateraccounting/wa/tree/master/Sheets/csv
output -- A list (length 2) with the output paths of the jpg files
for the two parts of the sheet
template -- A list (length 2) of the svg files of the sheet.
Use False (default) to use the standard svg files.
Example:
from wa.Sheets import *
create_sheet3(basin='Helmand', period='2007-2011',
units=['km3/yr', 'kg/ha/yr', 'kg/m3'],
data=[r'C:\Sheets\csv\Sample_sheet3_part1.csv',
r'C:\Sheets\csv\Sample_sheet3_part2.csv'],
output=[r'C:\Sheets\sheet_3_part1.jpg',
r'C:\Sheets\sheet_3_part2.jpg'])
"""
# Read table
df1 = pd.read_csv(data[0], sep=';')
df2 = pd.read_csv(data[1], sep=';')
# Data frames
df1c = df1.loc[df1.USE == "CROP"]
df1n = df1.loc[df1.USE == "NON-CROP"]
df2c = df2.loc[df2.USE == "CROP"]
df2n = df2.loc[df2.USE == "NON-CROP"]
# Read csv file part 1
crop_r01c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c01 = crop_r02c01 + crop_r03c01
crop_r01c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c02 = crop_r02c02 + crop_r03c02
crop_r01c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c03 = crop_r02c03 + crop_r03c03
crop_r01c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c04 = crop_r02c04 + crop_r03c04
crop_r01c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c05 = crop_r02c05 + crop_r03c05
crop_r01c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c06 = crop_r02c06 + crop_r03c06
crop_r01c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c07 = crop_r02c07 + crop_r03c07
crop_r01c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c08 = crop_r02c08 + crop_r03c08
crop_r01c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c09 = crop_r02c09 + crop_r03c09
crop_r01c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c10 = crop_r02c10 + crop_r03c10
crop_r01c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c11 = crop_r02c11 + crop_r03c11
crop_r01c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c12 = crop_r02c12 + crop_r03c12
noncrop_r01c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c01 = noncrop_r02c01 + noncrop_r03c01
noncrop_r01c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c02 = noncrop_r02c02 + noncrop_r03c02
crop_r01 = pd.np.nansum([crop_r01c01, crop_r01c02, crop_r01c03,
crop_r01c04, crop_r01c05, crop_r01c06,
crop_r01c07, crop_r01c08, crop_r01c09,
crop_r01c10, crop_r01c11, crop_r01c12])
crop_r02 = pd.np.nansum([crop_r02c01, crop_r02c02, crop_r02c03,
crop_r02c04, crop_r02c05, crop_r02c06,
crop_r02c07, crop_r02c08, crop_r02c09,
crop_r02c10, crop_r02c11, crop_r02c12])
crop_r03 = pd.np.nansum([crop_r03c01, crop_r03c02, crop_r03c03,
crop_r03c04, crop_r03c05, crop_r03c06,
crop_r03c07, crop_r03c08, crop_r03c09,
crop_r03c10, crop_r03c11, crop_r03c12])
crop_r04 = crop_r02 + crop_r03
noncrop_r01 = pd.np.nansum([noncrop_r01c01, noncrop_r01c02])
noncrop_r02 = pd.np.nansum([noncrop_r02c01, noncrop_r02c02])
noncrop_r03 = pd.np.nansum([noncrop_r03c01, noncrop_r03c02])
noncrop_r04 = noncrop_r02 + noncrop_r03
ag_water_cons = crop_r01 + crop_r04 + noncrop_r01 + noncrop_r04
# Read csv file part 2
# Land productivity
lp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
# Water productivity
wp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
# Calculations & modify svgs
if not template:
path = os.path.dirname(os.path.abspath(__file__))
svg_template_path_1 = os.path.join(path, 'svg', 'sheet_3_part1.svg')
svg_template_path_2 = os.path.join(path, 'svg', 'sheet_3_part2.svg')
else:
svg_template_path_1 = os.path.abspath(template[0])
svg_template_path_2 = os.path.abspath(template[1])
tree1 = ET.parse(svg_template_path_1)
tree2 = ET.parse(svg_template_path_2)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Titles
xml_txt_box = tree1.findall('''.//*[@id='basin']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree1.findall('''.//*[@id='period']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree1.findall('''.//*[@id='units']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 1: Agricultural water consumption (' + units[0] + ')'
xml_txt_box = tree2.findall('''.//*[@id='basin2']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree2.findall('''.//*[@id='period2']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree2.findall('''.//*[@id='units2']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 2: Land productivity (' + units[1] + ') and water productivity (' + units[2] + ')'
# Part 1
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c01']''')[0]
if not pd.isnull(crop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c02']''')[0]
if not pd.isnull(crop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c03']''')[0]
if not pd.isnull(crop_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c04']''')[0]
if not pd.isnull(crop_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c05']''')[0]
if not pd.isnull(crop_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c06']''')[0]
if not pd.isnull(crop_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c07']''')[0]
if not pd.isnull(crop_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c08']''')[0]
if not pd.isnull(crop_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c09']''')[0]
if not pd.isnull(crop_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c10']''')[0]
if not pd.isnull(crop_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c11']''')[0]
if not pd.isnull(crop_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c12']''')[0]
if not pd.isnull(crop_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01']''')[0]
if not pd.isnull(crop_r01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c01']''')[0]
if not pd.isnull(crop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c02']''')[0]
if not pd.isnull(crop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c03']''')[0]
if not pd.isnull(crop_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c04']''')[0]
if not pd.isnull(crop_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c05']''')[0]
if not pd.isnull(crop_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c06']''')[0]
if not pd.isnull(crop_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c07']''')[0]
if not pd.isnull(crop_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c08']''')[0]
if not pd.isnull(crop_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c09']''')[0]
if not pd.isnull(crop_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c10']''')[0]
if not pd.isnull(crop_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c11']''')[0]
if not pd.isnull(crop_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c12']''')[0]
if not pd.isnull(crop_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02']''')[0]
if not pd.isnull(crop_r02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c01']''')[0]
if not pd.isnull(crop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c02']''')[0]
if not pd.isnull(crop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c03']''')[0]
if not pd.isnull(crop_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c04']''')[0]
if not pd.isnull(crop_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c05']''')[0]
if not pd.isnull(crop_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c06']''')[0]
if not pd.isnull(crop_r03c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c07']''')[0]
if not pd.isnull(crop_r03c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c08']''')[0]
if not pd.isnull(crop_r03c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c09']''')[0]
if not pd.isnull(crop_r03c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c10']''')[0]
if not pd.isnull(crop_r03c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c11']''')[0]
if not pd.isnull(crop_r03c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c12']''')[0]
if not pd.isnull(crop_r03c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03']''')[0]
if not pd.isnull(crop_r03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c01']''')[0]
if not pd.isnull(crop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c02']''')[0]
if not pd.isnull(crop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c03']''')[0]
if not pd.isnull(crop_r04c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c04']''')[0]
if not pd.isnull(crop_r04c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c05']''')[0]
if not pd.isnull(crop_r04c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c06']''')[0]
if not pd.isnull(crop_r04c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c07']''')[0]
if not pd.isnull(crop_r04c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c08']''')[0]
if not pd.isnull(crop_r04c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c09']''')[0]
if not pd.isnull(crop_r04c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c10']''')[0]
if not pd.isnull(crop_r04c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c11']''')[0]
if not pd.isnull(crop_r04c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c12']''')[0]
if not pd.isnull(crop_r04c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04']''')[0]
if not pd.isnull(crop_r04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c01']''')[0]
if not pd.isnull(noncrop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c02']''')[0]
if not pd.isnull(noncrop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01']''')[0]
if not pd.isnull(noncrop_r01) and noncrop_r01 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c01']''')[0]
if not pd.isnull(noncrop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c02']''')[0]
if not pd.isnull(noncrop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02']''')[0]
if not pd.isnull(noncrop_r02) and noncrop_r02 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c01']''')[0]
if not pd.isnull(noncrop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c02']''')[0]
if not pd.isnull(noncrop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03']''')[0]
if not pd.isnull(noncrop_r03) and noncrop_r03 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c01']''')[0]
if not pd.isnull(noncrop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c02']''')[0]
if not pd.isnull(noncrop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04']''')[0]
if not pd.isnull(noncrop_r04) and noncrop_r04 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
# Part 2
xml_txt_box = tree1.findall('''.//*[@id='ag_water_cons']''')[0]
if not pd.isnull(ag_water_cons):
xml_txt_box.getchildren()[0].text = '%.2f' % ag_water_cons
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c01']''')[0]
if not pd.isnull(lp_r01c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c02']''')[0]
if not pd.isnull(lp_r01c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c03']''')[0]
if not pd.isnull(lp_r01c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c04']''')[0]
if not pd.isnull(lp_r01c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c05']''')[0]
if not pd.isnull(lp_r01c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c06']''')[0]
if not pd.isnull(lp_r01c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c07']''')[0]
if not pd.isnull(lp_r01c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c08']''')[0]
if not pd.isnull(lp_r01c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c09']''')[0]
if not pd.isnull(lp_r01c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c10']''')[0]
if not pd.isnull(lp_r01c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c11']''')[0]
if not pd.isnull(lp_r01c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c12']''')[0]
if not pd.isnull(lp_r01c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c01']''')[0]
if not pd.isnull(lp_r02c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c02']''')[0]
if not pd.isnull(lp_r02c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c03']''')[0]
if not pd.isnull(lp_r02c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c04']''')[0]
if not pd.isnull(lp_r02c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c05']''')[0]
if not pd.isnull(lp_r02c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c06']''')[0]
if not pd.isnull(lp_r02c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c07']''')[0]
if not pd.isnull(lp_r02c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c08']''')[0]
if not pd.isnull(lp_r02c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c09']''')[0]
if not pd.isnull(lp_r02c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c10']''')[0]
if not pd.isnull(lp_r02c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c11']''')[0]
if not pd.isnull(lp_r02c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c12']''')[0]
if not pd.isnull(lp_r02c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c01']''')[0]
if not pd.isnull(lp_r03c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c02']''')[0]
if not pd.isnull(lp_r03c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c03']''')[0]
if not pd.isnull(lp_r03c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c04']''')[0]
if not pd.isnull(lp_r03c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c05']''')[0]
if not pd.isnull(lp_r03c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c06']''')[0]
if not pd.isnull(lp_r03c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c07']''')[0]
if not pd.isnull(lp_r03c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c08']''')[0]
if not pd.isnull(lp_r03c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c09']''')[0]
if not pd.isnull(lp_r03c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c10']''')[0]
if not pd.isnull(lp_r03c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c11']''')[0]
if not pd.isnull(lp_r03c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c12']''')[0]
if not pd.isnull(lp_r03c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c01']''')[0]
if not pd.isnull(lp_r04c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c02']''')[0]
if not pd.isnull(lp_r04c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c03']''')[0]
if not pd.isnull(lp_r04c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c04']''')[0]
if not pd.isnull(lp_r04c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c05']''')[0]
if not pd.isnull(lp_r04c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c06']''')[0]
if not pd.isnull(lp_r04c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c07']''')[0]
if not pd.isnull(lp_r04c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c08']''')[0]
if not pd.isnull(lp_r04c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c09']''')[0]
if not pd.isnull(lp_r04c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c10']''')[0]
if not pd.isnull(lp_r04c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c11']''')[0]
if not pd.isnull(lp_r04c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c12']''')[0]
if not pd.isnull(lp_r04c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c01']''')[0]
if not pd.isnull(wp_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c02']''')[0]
if not pd.isnull(wp_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c03']''')[0]
if not pd.isnull(wp_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c04']''')[0]
if not pd.isnull(wp_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c05']''')[0]
if not pd.isnull(wp_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c06']''')[0]
if not pd.isnull(wp_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c07']''')[0]
if not pd.isnull(wp_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c08']''')[0]
if not pd.isnull(wp_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c09']''')[0]
if not pd.isnull(wp_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c10']''')[0]
if not pd.isnull(wp_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c11']''')[0]
if not pd.isnull(wp_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c12']''')[0]
if not pd.isnull(wp_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c01']''')[0]
if not pd.isnull(wp_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c02']''')[0]
if not pd.isnull(wp_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c03']''')[0]
if not pd.isnull(wp_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c04']''')[0]
if not pd.isnull(wp_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c05']''')[0]
if not pd.isnull(wp_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c06']''')[0]
if not pd.isnull(wp_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c07']''')[0]
if not pd.isnull(wp_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c08']''')[0]
if not pd.isnull(wp_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c09']''')[0]
if not pd.isnull(wp_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c10']''')[0]
if not pd.isnull(wp_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c11']''')[0]
if not pd.isnull(wp_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c12']''')[0]
if not pd.isnull(wp_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c01']''')[0]
if not pd.isnull(wp_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c02']''')[0]
if not pd.isnull(wp_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c03']''')[0]
if not pd.isnull(wp_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c04']''')[0]
if not pd.isnull(wp_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c05']''')[0]
if not pd.isnull(wp_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c06']''')[0]
if not pd.isnull(wp_r03c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c07']''')[0]
if not pd.isnull(wp_r03c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c08']''')[0]
if not pd.isnull(wp_r03c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c09']''')[0]
if not pd.isnull(wp_r03c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c10']''')[0]
if not pd.isnull(wp_r03c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c11']''')[0]
if not pd.isnull(wp_r03c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c12']''')[0]
if not pd.isnull(wp_r03c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c01']''')[0]
if not pd.isnull(wp_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c02']''')[0]
if not pd.isnull(wp_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c03']''')[0]
if not pd.isnull(wp_r04c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c04']''')[0]
if not pd.isnull(wp_r04c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c05']''')[0]
if not pd.isnull(wp_r04c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c06']''')[0]
if not pd.isnull(wp_r04c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c07']''')[0]
if not pd.isnull(wp_r04c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c08']''')[0]
if not pd.isnull(wp_r04c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c09']''')[0]
if not pd.isnull(wp_r04c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c10']''')[0]
if not pd.isnull(wp_r04c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c11']''')[0]
if not pd.isnull(wp_r04c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c12']''')[0]
if not pd.isnull(wp_r04c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r05c01']''')[0]
if not pd.isnull(lp_r05c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r05c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r05c02']''')[0]
if not pd.isnull(lp_r05c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r05c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r05c03']''')[0]
if not pd.isnull(lp_r05c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r05c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r05c04']''')[0]
if not pd.isnull(lp_r05c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r05c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r06c01']''')[0]
if not pd.isnull(lp_r06c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r06c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r06c02']''')[0]
if not pd.isnull(lp_r06c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r06c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r06c03']''')[0]
if not pd.isnull(lp_r06c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r06c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r06c04']''')[0]
if not pd.isnull(lp_r06c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r06c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r07c01']''')[0]
if not pd.isnull(lp_r07c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r07c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r07c02']''')[0]
if not pd.isnull(lp_r07c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r07c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r07c03']''')[0]
if not pd.isnull(lp_r07c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r07c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r07c04']''')[0]
if not pd.isnull(lp_r07c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r07c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r08c01']''')[0]
if not pd.isnull(lp_r08c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r08c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r08c02']''')[0]
if not pd.isnull(lp_r08c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r08c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r08c03']''')[0]
if not pd.isnull(lp_r08c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r08c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r08c04']''')[0]
if not pd.isnull(lp_r08c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r08c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r05c01']''')[0]
if not pd.isnull(wp_r05c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r05c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r05c02']''')[0]
if not pd.isnull(wp_r05c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r05c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r05c03']''')[0]
if not pd.isnull(wp_r05c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r05c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r05c04']''')[0]
if not pd.isnull(wp_r05c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r05c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r06c01']''')[0]
if not pd.isnull(wp_r06c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r06c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r06c02']''')[0]
if not pd.isnull(wp_r06c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r06c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r06c03']''')[0]
if not pd.isnull(wp_r06c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r06c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r06c04']''')[0]
if not pd.isnull(wp_r06c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r06c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r07c01']''')[0]
if not pd.isnull(wp_r07c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r07c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r07c02']''')[0]
if not pd.isnull(wp_r07c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r07c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r07c03']''')[0]
if not pd.isnull(wp_r07c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r07c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r07c04']''')[0]
if not pd.isnull(wp_r07c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r07c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r08c01']''')[0]
if not pd.isnull(wp_r08c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r08c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r08c02']''')[0]
if not pd.isnull(wp_r08c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r08c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r08c03']''')[0]
if not pd.isnull(wp_r08c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r08c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r08c04']''')[0]
if not | pd.isnull(wp_r08c04) | pandas.isnull |
# Kør herfra ved start for at få fat i de nødvendige funktioner og dataframes
import Functions
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
coin_list = ['BTC', 'BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS', ]
dfAllCoins = pd.DataFrame()
dfWMR = pd.read_csv('Data/' + coin_list[0] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfWMR['Date'] = pd.to_datetime(dfWMR['Date'], format='%b %d, %Y')
dfWMR['Date'] = pd.DatetimeIndex(dfWMR['Date']).date
dfWMR.index = dfWMR['Date']
dfWMR = dfWMR.sort_index()
for column in dfWMR.columns:
dfWMR = dfWMR.drop(columns=column)
dfPrices = dfWMR
dfReturns = dfWMR
dfMarketCap = dfWMR
dfPositive = dfWMR
dfNeutral = dfWMR
dfNegative = dfWMR
dfMOM3 = dfWMR
dfMOM5 = dfWMR
dfMOM7 = dfWMR
dfMOM14 = dfWMR
for i in range(0, len(coin_list)):
dfMarket = pd.read_csv('Data/' + coin_list[i] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfMarket['Date'] = pd.to_datetime(dfMarket['Date'], format='%b %d, %Y')
dfMarket['Date'] = pd.DatetimeIndex(dfMarket['Date']).date
dfMarket.index = dfMarket['Date']
dfMarket = dfMarket.sort_index()
dfMarket['Return'] = dfMarket['Close**'].pct_change()
dfMarket = dfMarket[1:]
dfMarket['Mom3'] = dfMarket.Return.rolling(3).sum()
dfMarket['Mom5'] = dfMarket.Return.rolling(5).sum()
dfMarket['Mom7'] = dfMarket.Return.rolling(7).sum()
dfMarket['Mom14'] = dfMarket.Return.rolling(14).sum()
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Return']
dfReturns = dfReturns.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Close**']
dfPrices = dfPrices.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom3']
dfMOM3 = dfMOM3.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom5']
dfMOM5 = dfMOM5.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom7']
dfMOM7 = dfMOM7.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom14']
dfMOM14 = dfMOM14.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Market Cap']
dfTemp[coin_list[i]] = dfTemp[coin_list[i]].fillna(method = 'ffill')
dfMarketCap = dfMarketCap.merge(dfTemp, how='left', left_index=True, right_index=True)
dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=',')
if coin_list[i] == 'BTC':
# dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=';')
dfSentiment = pd.read_csv('Data/All_Merged.csv', index_col=0, sep=',')
dfSentiment = dfSentiment[['positive_comment', 'neutral_comment', 'negative_comment']]
dfSentiment['Date'] = dfSentiment.index
dfSentiment['Date'] = pd.to_datetime(dfSentiment['Date'])
dfSentiment.index = pd.DatetimeIndex(dfSentiment['Date']).date
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['positive_comment']
dfPositive = dfPositive.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['negative_comment']
dfNegative = dfNegative.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['neutral_comment']
dfNeutral = dfNeutral.merge(dfTemp, how='left', left_index=True, right_index=True)
dfMarket['Coin'] = coin_list[i]
del dfSentiment['Date']
dfData = dfMarket.merge(dfSentiment, how='inner', left_index=True, right_index=True)
dfData = dfData.reset_index()
del dfData['index']
dfAllCoins = dfAllCoins.append(dfData)
dfAllCoins = dfAllCoins.drop(['created_utc'], axis=1)
dfWMR = pd.DataFrame()
dfReturnsLag = dfReturns.iloc[1:,:]
dfMarketCapLag = dfMarketCap.iloc[:-1,:]
dfMarketCapLag.index = dfReturnsLag.index
dfWMR['WMR'] = dfReturnsLag.multiply(dfMarketCapLag).sum(axis=1) / dfMarketCapLag.sum(axis=1)
dfPositiveSentimentSignal = | pd.DataFrame() | pandas.DataFrame |
## Simulate trial balancing to show it works
"""
S3B, right; S3C
PLOT_SIMULATED_DATA_AND_ACCURACY
N/A
Plot simulated trial balancing data and accuracy
"""
import numpy as np
import pandas
import matplotlib.pyplot as plt
import my.plot
import sklearn.linear_model
np.random.seed(0)
my.plot.manuscript_defaults()
my.plot.font_embed()
## Simulate some data
# This sets the ratio of correct to incorrect
correct_ratio = 2
n_incorrect = 30
trial_params = pandas.DataFrame.from_dict({
'stimulus': ['A', 'A', 'B', 'B'],
'choice': ['A', 'B', 'A', 'B'],
'n_trials': [
n_incorrect * correct_ratio, n_incorrect,
n_incorrect, n_incorrect * correct_ratio,
],
}).set_index(['stimulus', 'choice'])
# Generate trial_matrix
res_l = []
for stimulus, choice in trial_params.index:
n_trials = trial_params.loc[(stimulus, choice), 'n_trials']
res = pandas.DataFrame(
[(stimulus, choice)] * n_trials, columns=['stimulus', 'choice'])
res_l.append(res)
trial_matrix = pandas.concat(res_l, ignore_index=True)
trial_matrix.index.name = 'trial'
# Designate folds, should be okay to alternate since trial types are
# slowly varying
trial_matrix['set'] = np.mod(trial_matrix.index, 2)
trial_matrix['set'] = trial_matrix['set'].replace(
{0: 'train', 1: 'test'})
# Designate weight
trial_matrix['mouse_correct'] = trial_matrix['stimulus'] == trial_matrix['choice']
trial_matrix['weight'] = trial_matrix['mouse_correct'].replace(
{True: 1, False: correct_ratio})
## Define neuron weights
# The simulation params
# stim_choice_ratio: 0 = purely choice, 1 = purely stim
# noise_ratio: 0 = all signal (stim and choice combined); 1 = all noise
# Could also add a "gain" parameter, but this likely doesn't matter
neuron_weights = pandas.MultiIndex.from_product([
pandas.Series(np.array([.2, .4, .6]), name='noise_ratio'),
pandas.Series(np.linspace(0, 1, 5), name='stim_choice_ratio'),
]).to_frame().reset_index(drop=True)
# The actual weights of noise, stim, and choice
# These are constrained to add to 1
neuron_weights['noise_weight'] = neuron_weights['noise_ratio']
neuron_weights['stim_weight'] = (
(1 - neuron_weights['noise_weight']) * neuron_weights['stim_choice_ratio'])
neuron_weights['choice_weight'] = (
1 - neuron_weights['noise_weight'] - neuron_weights['stim_weight'])
neuron_weights.index.name = 'neuron'
## Iterate over repetitions
# Store data here
all_neuron_preds_l = []
all_neuron_coefs_l = []
all_neuron_preds_keys_l = []
response_data_l = []
response_data_keys_l = []
# This needs to be somewhat high because of the coin flip issue
n_repetitions = 50
# Iterate over repetitions
for n_repetition in range(n_repetitions):
## Generate data
# Merge trial matrix and neuron weights
to_merge1 = trial_matrix.reset_index()
to_merge2 = neuron_weights.reset_index()
to_merge1['key'] = 0
to_merge2['key'] = 0
merged = pandas.merge(to_merge1, to_merge2, on='key', how='outer')
# Draw
merged['response'] = (
-merged['stim_weight'] * (merged['stimulus'] == 'A').astype(np.int) +
merged['stim_weight'] * (merged['stimulus'] == 'B').astype(np.int) +
-merged['choice_weight'] * (merged['choice'] == 'A').astype(np.int) +
merged['choice_weight'] * (merged['choice'] == 'B').astype(np.int)
)
merged['response'] += (
merged['noise_weight'] * np.random.standard_normal(len(merged)))
# Drop the metadata and include only the responses
response_data = merged.set_index(
['neuron', 'trial'])['response'].unstack('neuron')
## Implement trial dropping, consistently for all decodes and neurons
# Do only on the train
train_trial_matrix = trial_matrix[trial_matrix['set'] == 'train'].copy()
test_trial_matrix = trial_matrix[trial_matrix['set'] == 'test'].copy()
# Size of the smallest group
gobj = train_trial_matrix.groupby(['stimulus', 'choice'])
dropped_n = gobj.size().min()
# Choose
dropped_sub_tm_l = []
for keys, sub_tm in gobj:
# Just take the first N because there's no temporal ordering here
dropped_sub_tm = sub_tm.iloc[:dropped_n]
dropped_sub_tm_l.append(dropped_sub_tm)
# Append test
dropped_sub_tm_l.append(test_trial_matrix)
# Generate new dropped_tm
dropped_tm = pandas.concat(dropped_sub_tm_l).sort_index()
## Iterate first over how to decode, then over neurons
for decode_label in ['stimulus', 'choice']:
for fit_method in ['naive', 'balanced', 'trial dropping']:
## Iterate over neurons
preds_l = []
coefs_l = []
preds_keys_l = []
for neuron in neuron_weights.index:
# Get neuron_data
neuron_data = response_data.loc[:, neuron]
if fit_method == 'trial dropping':
# Join on trial matrix
neuron_data = dropped_tm.join(neuron_data)
else:
# Join on trial matrix
neuron_data = trial_matrix.join(neuron_data)
assert len(neuron_data) == response_data.shape[0]
# Split
train_data = neuron_data[neuron_data['set'] == 'train']
test_data = neuron_data[neuron_data['set'] == 'test']
## Decode
# Set up model
# With only one feature, all this can do is threshold
# And by design we know the best threshold (intercept) is always zero
# So this could be done simply by assessing datapoints wrt zero
model = sklearn.linear_model.LogisticRegression(
fit_intercept=False, C=1.0,
)
# Fit
if fit_method in ['naive', 'trial dropping']:
model.fit(
train_data.loc[:, [neuron]].values,
train_data[decode_label].values,
)
elif fit_method == 'balanced':
model.fit(
train_data.loc[:, [neuron]].values,
train_data[decode_label].values,
sample_weight=train_data.loc[:, 'weight'].values,
)
else:
1/0
# Predict
preds = model.predict(
test_data.loc[:, [neuron]].values,
)
preds = | pandas.Series(preds, index=test_data.index, name='pred') | pandas.Series |
"""
- Takes a device-related folder as input, or anyway a folder containing all MUD rejected-traffic pcaps
- Generates all flow-files per-pcap
- Derives comprehensive NetFlow CSV per-pcap
- Labels the CSV according to the IEEE IoT NIDS dataset, EZVIZ ground truth
- Merges all CSVs together
Uses custom bash scripts
"""
import sys
import csv
import argparse
import ipaddress
import json
import os
import socket
import subprocess
from pathlib import Path
import pandas as pd
# Remove directories without errors, from https://stackoverflow.com/questions/31977833/rm-all-files-under-a-directory-using-python-subprocess-call
import shutil
import requests
import time
cwd = os.getcwd()
sys.path.insert(1, cwd + '/src/')
sys.path.insert(2, cwd + '/src/auto-scripts')
#from CsvProcessingUtilsIEEE import *
from Constants import *
debug = False
BASH_AUTO_PCAP_TO_FLOWS = BASE_DIR + 'src/auto-scripts/bash/pcap_to_flows.sh'
BASH_AUTO_FLOWS_TO_CSV = BASE_DIR + 'src/auto-scripts/bash/flows_to_csv.sh'
BASH_AUTO_MERGE_CSVS = BASE_DIR + 'src/auto-scripts/bash/merge_csvs.sh'
FLOWS_DIR_TAG = '-flows'
ALL_CSV_FLOWS_DIR_TAG = '-all-flows-csv'
######################################################
################### >>> Get arguments
######################################################
def get_args(arguments=None):
parser = argparse.ArgumentParser(description='TBD')
parser.add_argument('--pcaps_dir', metavar='<path to directory>', help='Path to directory with only PCAPs containing MUD-rejected traffic.', required=True)
args = parser.parse_args(arguments)
pcaps_dir = args.pcaps_dir
if os.path.isdir(pcaps_dir):
print('>>> Starting pcaps to labelled IEEE-IoT-NIDS csvs generation from directory: {}'.format(pcaps_dir))
return pcaps_dir if pcaps_dir.endswith('/') else pcaps_dir + '/'
else:
raise ValueError('Directory [ {} ] does not seem to exist. Exiting.'.format(pcaps_dir))
############################################################################################################
################### >>> BASH SCRIPTS INVOKATIONS
############################################################################################################
def pcaps_to_flows(pcaps_dir):
#subprocess.call(BASH_AUTO_PCAP_TO_FLOWS + " " + pcaps_dir, shell=True)
dir = os.fsencode(pcaps_dir)
for file in os.listdir(dir):
# Gets string of every file name in directory
pcap_file = os.fsdecode(file)
if pcap_file.endswith('.pcap'):
file_dir = os.path.splitext(pcap_file)[0]
file_path = pcaps_dir + pcap_file
output_path = pcaps_dir + file_dir + FLOWS_DIR_TAG + '/'
print('>>> Generating flow directory and files for \n>>> {}'.format(file_path))
# Create output dir if does not exist
Path(output_path).mkdir(parents=True, exist_ok=True)
subprocess.run(['nfpcapd', '-r', file_path, '-l', output_path])
print('>>> Flows generated at\n>>> {}'.format(output_path))
def flows_to_aggregated_csvs(pcaps_dir):
# To then copy all per-pcap flow csv into one directory
pcaps_flows_csvs_dir = pcaps_dir + os.path.basename(os.path.normpath(pcaps_dir)) + ALL_CSV_FLOWS_DIR_TAG
print(pcaps_flows_csvs_dir)
Path(pcaps_flows_csvs_dir).mkdir(parents=True, exist_ok=True)
dir = os.fsencode(pcaps_dir)
for data in os.listdir(dir):
# Gets string of every file name in directory
data_name = os.fsdecode(data)
# Target only folders generated previously. NOTE: Assumes no folder with FLOWS-DIR-TAG is "manually" generated
if data_name.endswith(FLOWS_DIR_TAG):
path_to_flows = pcaps_dir + data_name
print('>>> Generating and aggregating CSV NetFlow files for flows at tmp directory:\n>>> {}'.format(path_to_flows))
subprocess.call(BASH_AUTO_FLOWS_TO_CSV + " " + path_to_flows, shell=True)
subprocess.call(BASH_AUTO_MERGE_CSVS + " " + path_to_flows, shell=True)
merged_csv_name = data_name + '.csv'
merged_csv_path = path_to_flows + '/' + merged_csv_name
#print('MERGED CSV FLOWS PATH : {}'.format(merged_csv_path))
if os.path.isfile(merged_csv_path):
# Add csv to all pcaps-to-csvs folder
subprocess.run(['cp', merged_csv_path, pcaps_flows_csvs_dir + '/' + merged_csv_name])
# Merge all CSV flows into single flow
subprocess.call(BASH_AUTO_MERGE_CSVS + ' ' + pcaps_flows_csvs_dir, shell=True)
print('>>> All per-pcap CSVs have been saved in [ {} ], both separately and in a single CSV.'.format(pcaps_flows_csvs_dir))
aggregated_csvs_filename = os.path.basename(os.path.normpath(pcaps_flows_csvs_dir)) + '.csv'
print('>>> Merged all generated per-pcap CSV NetFlows to single CSV file at:\n>>> {}'.format(aggregated_csvs_filename))
return pcaps_flows_csvs_dir, aggregated_csvs_filename
############################################################################################################
################### >>> CSV METADATA MANIPULATION
############################################################################################################
def change_all_csv_header_to_custom(csvs_dir):
tgt_file_path = csvs_dir + '/' + os.path.basename(os.path.normpath(csvs_dir)) + '.csv'
if not tgt_file_path.endswith('.csv'):
sys.exit(0)
out_path = os.path.splitext(tgt_file_path)[0] + '-custom-fromat.csv'
print(out_path)
with open(tgt_file_path, newline='') as inFile, open(out_path, 'w', newline='') as outfile:
r = csv.reader(inFile)
w = csv.writer(outfile)
new_header = ['ts','te','td','pr','sa','da','sp','dp','sas','pas','ipkt','opkt','ibyt','obyt','flg','dir','bps','pps','bpp','cl','sl','al']
next(r, None) # skip the first row from the reader, the old header
# write new header
w.writerow(new_header)
# copy the rest
for row in r:
w.writerow(row)
return out_path
############################################################################################################
################### >>> CSV VALUES MANIPULATION
############################################################################################################
def to_float(val):
if isinstance(val, float):
if not np.isnan(val):
return float(val)
elif isinstance(val, str):
try:
return float(val)
except Exception as e:
if val.endswith('M'): # Manual 'Million' value from nfdump parsing
num = val.split()[0]
return float(num) * 1000000
return 0
return 0
def to_consistent_float_fields_ieee(df):
""" Used in clean_duplicates"""
df['td'] = df['td'].apply(to_float)
df['sas'] = df['sas'].apply(to_float)
df['pas'] = df['pas'].apply(to_float)
df['ipkt'] = df['ipkt'].apply(to_float)
df['opkt'] = df['opkt'].apply(to_float)
df['ibyt'] = df['ibyt'].apply(to_float)
df['obyt'] = df['obyt'].apply(to_float)
df['bps'] = df['bps'].apply(to_float)
df['pps'] = df['pps'].apply(to_float)
df['bpp'] = df['bpp'].apply(to_float)
df['cl'] = df['cl'].apply(to_float)
df['sl'] = df['sl'].apply(to_float)
df['al'] = df['al'].apply(to_float)
return df
def map_ports(csv_file):
df = pd.read_csv(csv_file)
values = {'sp' : [], 'dp': []}
tot_entries = df.shape[0]
for idx, entry in df.iterrows():
sp = int(entry.sp)
dp = int(entry.dp)
proto = str(entry.pr).lower()
try:
s_serv = socket.getservbyport(sp, proto)
except Exception as e:
s_serv = 'ephemeral'
try:
d_serv = socket.getservbyport(dp, proto)
except Exception as e:
d_serv = 'ephemeral'
#print('{}, {}'.format(s_serv, d_serv))
values['sp'].append(s_serv)
values['dp'].append(d_serv)
df['sp'] = values['sp']
df['dp'] = values['dp']
out_path = os.path.splitext(csv_file)[0] + '-portlabels.csv'
df.to_csv(out_path, sep=',', index=False)
print('OUT PATH: {}'.format(out_path))
return out_path
def remove_spaces_from_string_cols(df):
"""Utility cleaning"""
def lambda_func(x):
return x.replace(' ', '') if isinstance(x, str) else x
for col in df.columns:
df[col] = df[col].apply(lambda_func)
return df
def add_geo_data_m2(csv_file):
if not os.path.isfile(csv_file):
raise ValueError('\n>>> File \n>>>[ {} ] \n>>>does not seem to exist, or is not a file'.format(csv_file))
df = pd.read_csv(csv_file)
df = remove_spaces_from_string_cols(df)
print(df.head(5))
new_cols = ['sa_country', 'sa_city', 'sa_lat', 'sa_lon', 'sa_org', 'sa_asname', 'da_country', 'da_city', 'da_lat', 'da_lon', 'sa_org', 'da_asname']
new_cols_init = ['unresolved', 'unresolved', 0, 0, 'unresolved', 'unresolved', 'unresolved', 'unresolved', 0, 0, 'unresolved', 'unresolved']
df[new_cols] = pd.DataFrame([new_cols_init], index=df.index)
#print(df.head(10))
src_cols = ['sa_country', 'sa_city', 'sa_lat', 'sa_lon', 'sa_org', 'sa_as']
dst_cols = ['da_country', 'da_city', 'da_lat', 'da_lon', 'da_org', 'da_as']
local_vals = ['local', 'local', 0, 0, 'local', 'local',]
unsolved_vals = ['unresolved', 'unresolved', 0, 0, 'unresolved', 'unresolved']
addresses_cache = {}
if os.path.isfile('geo_cache.json'):
with open('geo_cache.json') as json_file:
addresses_cache = json.load(json_file)
try:
for idx, row in df.iterrows():
sa = row['sa']
da = row['da']
############ Source address ############
if not sa in addresses_cache.keys():
try:
if ipaddress.ip_address(sa).is_private:
df.loc[idx, src_cols] = local_vals
addresses_cache[sa] = local_vals
else:
ip_info = requests.get(GEOIP_API_JSON_URL + sa)
xrl = ip_info.headers['X-Rl']
ttl = ip_info.headers['X-Ttl']
ip_info = ip_info.json()
print(ip_info)
if ip_info['status'] == 'success':
vals = [ip_info['country'], ip_info['city'], ip_info['lat'], ip_info['lon'], ip_info['org'], ip_info['as']]
df.loc[idx, src_cols] = vals
addresses_cache[sa] = vals
else:
# Already unresolved
addresses_cache[sa] = unsolved_vals
if int(xrl) <= 1:
secs_wait = int(ttl) + 1
print('>>> API query frequency exceeded. Waiting for {} seconds before resuming queries.'.format(secs_wait))
time.sleep(secs_wait)
except Exception as e:
print('>>> EXCEPTED')
print(e)
df.loc[idx, src_cols] = unsolved_vals
addresses_cache[sa] = unsolved_vals
else:
print('>>> SA CACHED')
df.loc[idx, src_cols] = addresses_cache[sa]
############ Destination address ############
if not da in addresses_cache.keys():
try:
if ipaddress.ip_address(da).is_private:
df.loc[idx, dst_cols] = local_vals
addresses_cache[da] = local_vals
else:
ip_info = requests.get(GEOIP_API_JSON_URL + da)
xrl = ip_info.headers['X-Rl']
ttl = ip_info.headers['X-Ttl']
ip_info = ip_info.json()
print(ip_info)
if ip_info['status'] == 'success':
vals = [ip_info['country'], ip_info['city'], ip_info['lat'], ip_info['lon'], ip_info['org'], ip_info['as']]
df.loc[idx, dst_cols] = vals
addresses_cache[da] = vals
else:
# Already unresolved
addresses_cache[da] = unsolved_vals
if int(xrl) <= 1:
secs_wait = int(ttl) + 1
print('>>> API query frequency exceeded. Waiting for {} seconds before resuming queries.'.format(secs_wait))
time.sleep(secs_wait)
except Exception as e:
print('>>> EXCEPTED')
print(e)
df.loc[idx, dst_cols] = unsolved_vals
addresses_cache[da] = unsolved_vals
else:
print('>>> DA CACHED')
df.loc[idx, dst_cols] = addresses_cache[da]
print(df.iloc[idx][src_cols])
print(df.iloc[idx][dst_cols])
############ For loop end ############
except KeyboardInterrupt:
print('Interrupted')
print('>>> EXCEPTED')
with open('geo_cache.json', 'w') as outfile:
json.dump(addresses_cache, outfile, indent=4)
sys.exit(0)
with open('geo_cache.json', 'w') as outfile:
json.dump(addresses_cache, outfile, indent=4)
df.to_csv('geo_df_csv.csv', sep=',')
############################################################################################################
################### >>> CSV LABELLINGS
############################################################################################################
def assign_default_labels(csv_file):
csv_in = pd.read_csv(csv_file)
#print(csv_in.sample(5))
csv_in['MALICIOUS'] = [DEFAULT_MALICIOUSNESS_LABEL] * len(csv_in)
csv_in['ATK_TYPE'] = [DEFUALT_ATK_LABEL] * len(csv_in)
#print(csv_in.sample(5))
print(csv_in.shape)
csv_in = csv_in.sort_values('ts')
print(csv_in.sample(10))
out_path = os.path.splitext(csv_file)[0]+'-READY.csv'
csv_in.to_csv(out_path, sep=',', index = False)
print('OUT PATH: {}'.format(out_path))
return out_path
def assign_ieee_labels(csv_to_label, csv_labelled):
if not os.path.isfile(csv_to_label) or not os.path.isfile(csv_labelled):
raise ValueError('>>> Unable to read file \n[ {} ] \nor [ {} ]. Exiting.'.format(csv_to_label, csv_labelled))
# IEEE IoT NIDS default csv_labelled @
# /Users/lucamrgs/Big_Data/IEEE-Huy-Kang/iot_intrusion_dataset/attacks-all-ezviz/GT-ALL-EZVIZ-LABELLED.csv
pd_csv_to_label = pd.read_csv(csv_to_label)
pd_csv_labelled = pd.read_csv(csv_labelled)
#pd_csv_to_label = to_consistent_float_fields_ieee(pd_csv_to_label)
#pd_csv_labelled = to_consistent_float_fields_ieee(pd_csv_labelled)
idx = ['ts', 'te', 'td', 'sa', 'da', 'sp', 'dp', 'pr', 'flg', 'fwd', 'stos',
'ipkt', 'ibyt', 'opkt', 'obyt', 'in', 'out', 'sas', 'das', 'smk', 'dmk',
'dtos', 'dir', 'nh', 'nhb', 'svln', 'dvln', 'ismc', 'odmc', 'idmc',
'osmc', 'mpls1', 'mpls2', 'mpls3', 'mpls4', 'mpls5', 'mpls6', 'mpls7',
'mpls8', 'mpls9', 'mpls10', 'cl', 'sl', 'al', 'ra', 'eng', 'exid',
'tr']
idx2 = ['ts','te','td','pr','sa','da','sp','dp','sas','pas','ipkt','opkt','ibyt','obyt','flg','dir','bps','pps','bpp','cl','sl','al']
dups_stable_fields = ['ts', 'pr', 'sa', 'da', 'sp', 'dp', 'flg', 'bpp']
bidir_flows_stable_fields = ['ts', 'pr', 'sa', 'da', 'sp', 'dp']
print('############################################')
print('########## ~ MERGING DATAFRAMES ~ ##########')
print('############################################')
print('INITIAL SHAPE')
print(pd_csv_to_label.shape)
#print('INITIAL HEAD')
#print(pd_csv_to_label.head(5))
print(pd_csv_labelled.shape)
pd_csv_to_label.join(pd_csv_labelled.set_index(bidir_flows_stable_fields), on=bidir_flows_stable_fields, lsuffix='_original', rsuffix='_joined')
# @ https://stackoverflow.com/questions/44781633/join-pandas-dataframes-based-on-column-values
df = pd.merge(pd_csv_to_label, pd_csv_labelled, how='left', on=bidir_flows_stable_fields)
print(df.shape)
# Labels in MALICIOUS_y, ATK_TYPE_y
print(df.columns)
# Clean headers
for col in df.columns:
if not (col == 'MALICIOUS_y' or col == 'ATK_TYPE_y') and col.endswith('_y'):
df = df.drop(col, axis=1)
df = df.drop(['MALICIOUS_x', 'ATK_TYPE_x'], axis=1)
df = df.drop([c for c in df.columns if not (col == 'MALICIOUS_y' or col == 'ATK_TYPE_y') and col.endswith('_y')])
new_cols = [c.rsplit('_', 1)[0] for c in df.columns if (c.endswith('_x') or c.endswith('_y')) or c != 'ATK_TYPE_y']
df.columns = new_cols
#print(df.columns)
df['MALICIOUS'] = df['MALICIOUS'].fillna(0)
df['ATK_TYPE'] = df['ATK_TYPE'].fillna('unknown')
df = to_consistent_float_fields_ieee(df)
#print(df.sample(30)['ATK_TYPE'])
print('############################################')
print('######## ~ END MERGING DATAFRAMES ~ ########')
print('############################################')
#out_path = os.path.splitext(csv_to_label)[0]+'-ieee-lbls-FINAL.csv'
df = remove_spaces_from_string_cols(df)
out_path = os.path.splitext(csv_to_label)[0]+'-LABELLED-FINAL.csv'
df.to_csv(out_path, float_format='%.3f', index=False)
return out_path
############################################################################################################
################### >>> CSV CLEANING
############################################################################################################
def remove_meta_rows(csv_file):
if not os.path.isfile(csv_file):
raise ValueError('\n>>> File \n>>>[ {} ] \n>>>does not seem to exist, or is not a file'.format(csv_file))
filename = os.path.basename(os.path.normpath(csv_file))
csv_in = pd.read_csv(csv_file)
#df = csv_in[(csv_in.ts != 'Summary') & (csv_in.ts != 'flows') & (~(csv_in.ts.astype(str).str.isnumeric()))]
df = pd.DataFrame(csv_in)
#print(df)
out_path = os.path.splitext(csv_file)[0]+'-clear.csv'
print('OUT PATH: {}'.format(out_path))
df.to_csv(out_path, index = False)
return out_path
def clean_duplicates(csv_file):
df = pd.read_csv(csv_file)
df = to_consistent_float_fields_ieee(df)
print(df.head(5))
dups_stable_fields = ['ts', 'pr', 'sa', 'da', 'sp', 'dp', 'flg', 'bpp']
df = df.drop_duplicates(dups_stable_fields, keep='last').sort_values('ts')
print(df.head(5))
out_path = os.path.splitext(csv_file)[0]+'-CLN.csv'
df.to_csv(out_path, sep=',', float_format='%.3f', index=False)
print('DF SHAPE AFTER REMOVING DUPS: {}'.format(df.shape))
print(df.sample(10))
return out_path
############################################################################################################
################### >>> DIRECTORY CLEANING
############################################################################################################
def clean_up_flow_folders(pcap_dir, ask=False):
print('>>> Removing all temporary flows folders...')
dir = os.fsencode(pcap_dir)
for data in os.listdir(dir):
data_name = os.fsdecode(data)
if data_name.endswith(FLOWS_DIR_TAG):
path_to_file = pcap_dir + data_name
if ask:
print('>>> REMOVE : {} ?'.format(path_to_file))
resp = input('Type y for Yes, whatever for No\n>>> ')
if resp == 'y':
shutil.rmtree(path_to_file)
#subprocess.run(['rmdir', '-rf', path_to_file], shell=True)
else:
print('>>> File [ {} ] salvaged'.format(path_to_file))
else:
shutil.rmtree(path_to_file)
print('>>> Removed: {}'.format(path_to_file))
if __name__ == '__main__':
"""
dir = get_args()
print(dir)
pcaps_to_flows(dir)
all_csvs_dir, all_csvs_file = flows_to_aggregated_csvs(dir)
print(all_csvs_dir)
all_csvs_file = change_all_csv_header_to_custom(all_csvs_dir)
print(all_csvs_file)
def_labels_csv = assign_default_labels(all_csvs_file)
print(def_labels_csv)
all_csvs_file = clean_duplicates(def_labels_csv)
print('ALL CSVS FILE: {}'.format(all_csvs_file))
final_file = assign_ieee_labels(def_labels_csv, IEEE_EZVIZ_GT_CSV)
print(final_file)
clean_up_flow_folders(dir)
"""
# Tests
#add_geo_data_m2('./outputs/ieee-ezviz-complete/ieee-ezviz-complete-all-flows-bidir-csv-custom-fromat-LABELLED-FINAL.csv')
geo_df = | pd.read_csv('geo_df_csv.csv') | pandas.read_csv |
from copy import deepcopy
import datetime
import inspect
import pydoc
import numpy as np
import pytest
from pandas.compat import PY37
from pandas.util._test_decorators import async_mark, skip_if_no
import pandas as pd
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
import pandas._testing as tm
class TestDataFrameMisc:
@pytest.mark.parametrize("attr", ["index", "columns"])
def test_copy_index_name_checking(self, float_frame, attr):
# don't want to be able to modify the index stored elsewhere after
# making a copy
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
expected = pd.Index([f"foo#{c}" for c in float_frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = float_frame.add_suffix("#foo")
expected = pd.Index([f"{c}#foo" for c in float_frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = float_frame.add_prefix("%")
expected = pd.Index([f"%{c}" for c in float_frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = float_frame.add_suffix("%")
expected = pd.Index([f"{c}%" for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_keys(self, float_frame):
getkeys = float_frame.keys
assert getkeys() is float_frame.columns
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = pd.DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), pd.Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = pd.DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_column_name_contains_unicode_surrogate(self):
# GH 25509
colname = "\ud83d"
df = DataFrame({colname: []})
# this should not crash
assert colname not in dir(df)
assert df.columns[0] == colname
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_array_interface(self, float_frame):
with np.errstate(all="ignore"):
result = np.sqrt(float_frame)
assert isinstance(result, type(float_frame))
assert result.index is float_frame.index
assert result.columns is float_frame.columns
tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
assert isinstance(v, DataFrame._constructor_sliced)
def test_items(self):
# GH 17213, GH 13918
cols = ["a", "b", "c"]
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_iter(self, float_frame):
assert tm.equalContents(list(float_frame), float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH 19671
s = DataFrame(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
}
)
for k, v in s.iterrows():
exp = s.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_corner(self):
# gh-12222
df = DataFrame(
{
"a": [datetime.datetime(2015, 1, 1)],
"b": [None],
"c": [None],
"d": [""],
"e": [[]],
"f": [set()],
"g": [{}],
}
)
expected = Series(
[datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],
index=list("abcdefg"),
name=0,
dtype="object",
)
_, result = next(df.iterrows())
tm.assert_series_equal(result, expected)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = DataFrame._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
tm.assert_series_equal(s, expected)
df = DataFrame(
{"floats": np.random.randn(5), "ints": range(5)}, columns=["floats", "ints"]
)
for tup in df.itertuples(index=False):
assert isinstance(tup[1], int)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[["a", "a"]]
assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)]
# repr with int on 32-bit/windows
if not (compat.is_platform_windows() or compat.is_platform_32bit()):
assert (
repr(list(df.itertuples(name=None)))
== "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]"
)
tup = next(df.itertuples(name="TestName"))
assert tup._fields == ("Index", "a", "b")
assert (tup.Index, tup.a, tup.b) == tup
assert type(tup).__name__ == "TestName"
df.columns = ["def", "return"]
tup2 = next(df.itertuples(name="TestName"))
assert tup2 == (0, 1, 4)
assert tup2._fields == ("Index", "_1", "_2")
df3 = DataFrame({"f" + str(i): [i] for i in range(1024)})
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
assert isinstance(tup3, tuple)
if PY37:
assert hasattr(tup3, "_fields")
else:
assert not hasattr(tup3, "_fields")
# GH 28282
df_254_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(254)}])
result_254_columns = next(df_254_columns.itertuples(index=False))
assert isinstance(result_254_columns, tuple)
assert hasattr(result_254_columns, "_fields")
df_255_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(255)}])
result_255_columns = next(df_255_columns.itertuples(index=False))
assert isinstance(result_255_columns, tuple)
# Dataframes with >=255 columns will fallback to regular tuples on python < 3.7
if PY37:
assert hasattr(result_255_columns, "_fields")
else:
assert not hasattr(result_255_columns, "_fields")
def test_sequence_like_with_categorical(self):
# GH 7839
# make sure can iterate
df = DataFrame(
{"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
)
df["grade"] = Categorical(df["raw_grade"])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.items():
str(s)
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
def test_values_mixed_dtypes(self, float_frame, float_string_frame):
frame = float_frame
arr = frame.values
frame_cols = frame.columns
for i, row in enumerate(arr):
for j, value in enumerate(row):
col = frame_cols[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
arr = float_string_frame[["foo", "A"]].values
assert arr[0, 0] == "bar"
df = DataFrame({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
arr = df.values
assert arr[0, 0] == 1j
# single block corner case
arr = float_frame[["A", "B"]].values
expected = float_frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(arr, expected)
def test_to_numpy(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4.5]])
result = df.to_numpy()
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_dtype(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4]], dtype="int64")
result = df.to_numpy(dtype="int64")
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_copy(self):
arr = np.random.randn(4, 3)
df = pd.DataFrame(arr)
assert df.values.base is arr
assert df.to_numpy(copy=False).base is arr
assert df.to_numpy(copy=True).base is not arr
def test_to_numpy_mixed_dtype_to_str(self):
# https://github.com/pandas-dev/pandas/issues/35455
df = pd.DataFrame([[pd.Timestamp("2020-01-01 00:00:00"), 100.0]])
result = df.to_numpy(dtype=str)
expected = np.array([["2020-01-01 00:00:00", "100.0"]], dtype=str)
tm.assert_numpy_array_equal(result, expected)
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
tm.assert_frame_equal(df.T, df.swapaxes(1, 0))
tm.assert_frame_equal(df, df.swapaxes(0, 0))
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.swapaxes(2, 5)
def test_axis_aliases(self, float_frame):
f = float_frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis="index")
tm.assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis="columns")
tm.assert_series_equal(result, expected)
def test_class_axis(self):
# GH 18147
# no exception and no empty docstring
assert pydoc.getdoc(DataFrame.index)
assert pydoc.getdoc(DataFrame.columns)
def test_more_values(self, float_string_frame):
values = float_string_frame.values
assert values.shape[1] == len(float_string_frame.columns)
def test_repr_with_mi_nat(self, float_string_frame):
df = DataFrame(
{"X": [1, 2]}, index=[[pd.NaT, pd.Timestamp("20130101")], ["a", "b"]]
)
result = repr(df)
expected = " X\nNaT a 1\n2013-01-01 b 2"
assert result == expected
def test_items_names(self, float_string_frame):
for k, v in float_string_frame.items():
assert v.name == k
def test_series_put_names(self, float_string_frame):
series = float_string_frame._series
for k, v in series.items():
assert v.name == k
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
assert not df.empty
df = DataFrame(index=[1], columns=[1])
assert not df.empty
df = DataFrame(index=["a", "b"], columns=["c", "d"]).dropna()
assert df.empty
assert df.T.empty
empty_frames = [
DataFrame(),
DataFrame(index=[1]),
DataFrame(columns=[1]),
DataFrame({1: []}),
]
for df in empty_frames:
assert df.empty
assert df.T.empty
def test_with_datetimelikes(self):
df = DataFrame(
{
"A": date_range("20130101", periods=10),
"B": timedelta_range("1 day", periods=10),
}
)
t = df.T
result = t.dtypes.value_counts()
expected = Series({np.dtype("object"): 10})
tm.assert_series_equal(result, expected)
def test_values(self, float_frame):
float_frame.values[:, 0] = 5.0
assert (float_frame.values[:, 0] == 5).all()
def test_deepcopy(self, float_frame):
cp = deepcopy(float_frame)
series = cp["A"]
series[:] = 10
for idx, value in series.items():
assert float_frame["A"][idx] != value
def test_inplace_return_self(self):
# GH 1893
data = DataFrame(
{"a": ["foo", "bar", "baz", "qux"], "b": [0, 0, 1, 1], "c": [1, 2, 3, 4]}
)
def _check_f(base, f):
result = f(base)
assert result is None
# -----DataFrame-----
# set_index
f = lambda x: x.set_index("a", inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index("a"), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values("b", inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()["c"]
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index("a")["c"], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(d.copy(), f)
@async_mark()
async def test_tab_complete_warning(self, ip):
# GH 16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; df = pd.DataFrame()"
await ip.run_code(code)
# TODO: remove it when Ipython updates
# GH 33567, jedi version raises Deprecation warning in Ipython
import jedi
if jedi.__version__ < "0.17.0":
warning = tm.assert_produces_warning(None)
else:
warning = tm.assert_produces_warning(
DeprecationWarning, check_stacklevel=False
)
with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("df.", 1))
def test_attrs(self):
df = pd.DataFrame({"A": [2, 3]})
assert df.attrs == {}
df.attrs["version"] = 1
result = df.rename(columns=str)
assert result.attrs == {"version": 1}
def test_cache_on_copy(self):
# GH 31784 _item_cache not cleared on copy causes incorrect reads after updates
df = DataFrame({"a": [1]})
df["x"] = [0]
df["a"]
df.copy()
df["a"].values[0] = -1
tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0]}))
df["y"] = [0]
assert df["a"].values[0] == -1
tm.assert_frame_equal(df, | DataFrame({"a": [-1], "x": [0], "y": [0]}) | pandas.DataFrame |
#%%
"""
# qpcr_sites.py
Manages site information for Wastewater, using a sites info (Excel) file and a sites configuration (YAML) file.
The YAML file specifies the column names in the Excel file to retrieve info from, as well as descriptions of
each site sampling type (eg. rawWW in ODM maps to the description "Influent" (short description) or
"Raw wastewater" (long description)).
## Usage
sites = QPCRSites("sites.yaml", "sites.xlsx")
siteid = sites.get_sample_siteid("o.08.20.21") # eg: "o"
sitetitle = sites.get_sample_site_title("o.08.20.21") # eg: "Ottawa"
"""
from easydict import EasyDict
import pandas as pd
import numpy as np
import yaml
from qpcr_utils import (
rename_columns,
parse_values,
cleanup_file_name,
load_config,
)
import re
class QPCRSites(object):
def __init__(self, config_file, sites_file=None):
super().__init__()
self.config = load_config(config_file)
self.sites_df = None
if sites_file:
xl = pd.ExcelFile(sites_file)
self.sites_df = xl.parse(xl.sheet_names[0])
self.sites_df.columns = [c.strip() for c in self.sites_df.columns]
self.sites_df.dropna(subset=[])
# Clean up column names
all_columns = [c.column for c in self.config.columns.values()]
rename_columns(self.sites_df, all_columns)
# Make values lower case
for c in self.config.columns.values():
if c.get("make_lower", False):
self.sites_df[c.column] = self.sites_df[c.column].str.lower()
elif c.get("make_upper", False):
self.sites_df[c.column] = self.sites_df[c.column].str.upper()
def get_siteid(self, siteid):
"""Get the valid siteid. If siteid is not recognized then None is returned. Site ID aliases will also be mapped to the actual site ID.
"""
return self.resolve_aliases(siteid)
def get_site_title(self, siteid, default=None):
return self.get_site_info(siteid, self.config.columns.site_title.column, default=default)
# def get_siteids_with_shared_parentid(self, siteid):
# """Get all site IDs, as a list, that share the parent of the specified siteid.
# """
# parentid = self.get_site_parentid(siteid)
# return self.get_siteids_in_parentid(parentid)
def get_siteids_in_parentid(self, parentid):
"""Get all site IDs (including aliases) that are a member of the specified parent ID.
"""
if isinstance(parentid, (pd.Series, pd.DataFrame)):
return self.run_map(parentid, self.get_siteids_in_parentid)
if not parentid:
return []
filt = self.sites_df[self.config.columns.parentid.column].str.lower() == parentid.strip().lower()
siteids = list(self.sites_df[filt][self.config.columns.siteid.column].unique())
siteids = [s for s in siteids if s and not pd.isna(s)]
aliasids = list(self.sites_df[filt][self.config.columns.siteid_aliases.column].unique())
aliasids = [s for s in aliasids if s and not pd.isna(s)]
siteids = list(dict.fromkeys(siteids + aliasids))
return siteids
def run_map(self, df, func, *args, **kwargs):
"""Call the function on each cell in the df (either a pd.Series or a pd.DataFrame). The parameters passed to the function are the cell contents
followed by args and kwargs. This is equivalent to calling map (for a pd.Series) or applymap (for a pd.DataFrame). The calls are not in-place,
but the returned and modified pd.Series or pd.DataFrame can be used.
"""
if isinstance(df, pd.Series):
return df.map(lambda x: func(x, *args, **kwargs))
elif isinstance(df, pd.DataFrame):
return df.applymap(lambda x: func(x, *args, **kwargs))
return df
def get_site_siteid_aliases(self, siteid):
"""Get all aliases for the specified site ID. Aliases are IDs that are commonly used but that should be mapped to a standard ID
eg. "gat" might be an alias for the proper site ID "g".
"""
if isinstance(siteid, (pd.Series, pd.DataFrame)):
return self.run_map(siteid, self.get_site_siteid_aliases)
aliases = self.get_site_info(siteid, self.config.columns.siteid_aliases.column)
return [a.strip() for a in aliases.split(",")]
def get_site_parentid(self, siteid, default=None):
return self.get_site_info(siteid, self.config.columns.parentid.column, default=default)
def get_site_parent_title(self, siteid, default=None):
return self.get_site_info(siteid, self.config.columns.parent_title.column, default=default)
def get_site_sample_type(self, siteid, default=None):
return self.get_site_info(siteid, self.config.columns.sample_type.column, default=default)
def get_site_file_id(self, siteid, default=None):
return self.get_site_info(siteid, self.config.columns.fileid.column, default=default)
def get_sample_siteid(self, sample_id, default=None):
return self.get_site_info(self.get_siteid_from_sampleid(sample_id), self.config.columns.siteid.column, default=default)
def get_sample_site_title(self, sample_id, default=None):
return self.get_site_info(self.get_siteid_from_sampleid(sample_id), self.config.columns.site_title.column, default=default)
def get_sample_siteid_aliases(self, sample_id):
if isinstance(sample_id, (pd.Series, pd.DataFrame)):
return self.run_map(sample_id, self.get_sample_siteid_aliases)
aliases = self.get_site_info(self.get_siteid_from_sampleid(sample_id), self.config.columns.siteid_aliases.column)
if | pd.isna(aliases) | pandas.isna |
import unittest
import unittest.mock as mock
import asyncio
import warnings
from fplpandas import FPLPandas
import logging as log
import pandas as pd
from pandas.util.testing import assert_frame_equal
log.basicConfig(level=log.INFO, format='%(message)s')
class TestFplPandas(unittest.TestCase):
def test_get_teams(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
fpl_mock = mock.MagicMock()
async def mock_get_team(team_ids, return_json):
self.assertEqual(team_ids, None)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_teams = mock_get_team
fpl = FPLPandas(fpl=fpl_mock)
actual_df = fpl.get_teams()
expected_df = pd.DataFrame.from_dict(test_data).set_index('id')
self.assertTrue(expected_df.equals(actual_df))
def test_get_teams_with_ids(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
fpl_mock = mock.MagicMock()
async def mock_get_team(team_ids, return_json):
self.assertEqual(team_ids, [1, 2])
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_teams = mock_get_team
fpl = FPLPandas(fpl=fpl_mock)
actual_df = fpl.get_teams([1, 2])
expected_df = pd.DataFrame.from_dict(test_data).set_index('id')
self.assertTrue(expected_df.equals(actual_df))
def test_get_game_weeks(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
fpl_mock = mock.MagicMock()
async def mock_game_weeks(game_week_ids, return_json):
self.assertEqual(game_week_ids, None)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_gameweeks = mock_game_weeks
fpl = FPLPandas(fpl=fpl_mock)
actual_df = fpl.get_game_weeks()
expected_df = pd.DataFrame.from_dict(test_data).set_index('id')
self.assertTrue(expected_df.equals(actual_df))
def test_get_game_weeks_with_ids(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
fpl_mock = mock.MagicMock()
async def mock_get_game_weeks(game_week_ids, return_json):
self.assertEqual(game_week_ids, [1, 2])
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_gameweeks = mock_get_game_weeks
fpl = FPLPandas(fpl=fpl_mock)
actual_df = fpl.get_game_weeks([1, 2])
expected_df = pd.DataFrame.from_dict(test_data).set_index('id')
self.assertTrue(expected_df.equals(actual_df))
def test_get_fixtures(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
fpl_mock = mock.MagicMock()
@asyncio.coroutine
def mock_get_fixtures(return_json):
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_fixtures = mock_get_fixtures
fpl = FPLPandas(fpl=fpl_mock)
actual_df = fpl.get_fixtures()
expected_df = pd.DataFrame.from_dict(test_data).set_index('id')
self.assertTrue(expected_df.equals(actual_df))
def test_get_player(self):
test_data = {'id': 1, 'attr1': 'value11', 'attr2': 'value12',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
}
expected_history_past = [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1}]
expected_history = [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1}]
expected_fixtures = [{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1}]
expected_player_df = pd.DataFrame.from_records([test_data], index=['id']).rename(index={'id': 'player_id'})
expected_history_past_df = pd.DataFrame.from_dict(expected_history_past).set_index(['player_id', 'season_name'])
expected_history_df = pd.DataFrame.from_dict(expected_history).set_index(['player_id', 'fixture'])
expected_fixtures_df = pd.DataFrame.from_dict(expected_fixtures).set_index(['player_id', 'event'])
fpl_mock = mock.MagicMock()
async def mock_get_player(player_id, players, include_summary, return_json):
self.assertEqual(player_id, 1)
self.assertEqual(players, None)
self.assertEqual(include_summary, True)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_player = mock_get_player
fpl = FPLPandas(fpl=fpl_mock)
actual_player_df, actual_history_past_df, actual_history_df, actual_fixture_df = fpl.get_player(1)
assert_frame_equal(expected_player_df, actual_player_df)
assert_frame_equal(expected_history_past_df, actual_history_past_df)
assert_frame_equal(expected_history_df, actual_history_df)
assert_frame_equal(expected_fixtures_df, actual_fixture_df)
def test_get_player_with_no_history(self):
test_data = {'id': 1, 'attr1': 'value11', 'attr2': 'value12',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
}
expected_history_past = [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1}]
expected_fixtures = [{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1}]
expected_player_df = pd.DataFrame.from_records([test_data], index=['id']).rename(index={'id': 'player_id'})
expected_history_past_df = pd.DataFrame.from_dict(expected_history_past).set_index(['player_id', 'season_name'])
expected_history_df = pd.DataFrame(columns=['player_id', 'fixture']).set_index(['player_id', 'fixture'])
expected_fixtures_df = pd.DataFrame.from_dict(expected_fixtures).set_index(['player_id', 'event'])
fpl_mock = mock.MagicMock()
async def mock_get_player(player_id, players, include_summary, return_json):
self.assertEqual(player_id, 1)
self.assertEqual(players, None)
self.assertEqual(include_summary, True)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_player = mock_get_player
fpl = FPLPandas(fpl=fpl_mock)
actual_player_df, actual_history_past_df, actual_history_df, actual_fixture_df = fpl.get_player(1)
assert_frame_equal(expected_player_df, actual_player_df)
assert_frame_equal(expected_history_past_df, actual_history_past_df)
assert_frame_equal(expected_history_df, actual_history_df, check_index_type=False)
assert_frame_equal(expected_fixtures_df, actual_fixture_df)
def test_get_players_all(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
}]
expected_players= [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
expected_history_past = [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_history = [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_fixtures= [{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_players_df = pd.DataFrame.from_dict(expected_players).set_index('id').rename(index={'id': 'player_id'})
expected_history_past_df = pd.DataFrame.from_dict(expected_history_past).set_index(['player_id', 'season_name'])
expected_history_df = pd.DataFrame.from_dict(expected_history).set_index(['player_id', 'fixture'])
expected_fixtures_df = pd.DataFrame.from_dict(expected_fixtures).set_index(['player_id', 'event'])
fpl_mock = mock.MagicMock()
async def mock_get_players(player_ids, include_summary, return_json):
self.assertIsNone(player_ids)
self.assertEqual(include_summary, True)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_players = mock_get_players
fpl = FPLPandas(fpl=fpl_mock)
actual_players_df, actual_history_past_df, actual_history_df, actual_fixture_df = fpl.get_players()
assert_frame_equal(expected_players_df, actual_players_df)
assert_frame_equal(expected_history_past_df, actual_history_past_df)
assert_frame_equal(expected_history_df, actual_history_df)
assert_frame_equal(expected_fixtures_df, actual_fixture_df)
def test_get_players_with_ids(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
}]
expected_players= [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
expected_history_past = [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_history = [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_fixtures= [{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_players_df = pd.DataFrame.from_dict(expected_players).set_index('id').rename(index={'id': 'player_id'})
expected_history_past_df = pd.DataFrame.from_dict(expected_history_past).set_index(['player_id', 'season_name'])
expected_history_df = pd.DataFrame.from_dict(expected_history).set_index(['player_id', 'fixture'])
expected_fixtures_df = pd.DataFrame.from_dict(expected_fixtures).set_index(['player_id', 'event'])
fpl_mock = mock.MagicMock()
async def mock_get_players(player_ids, include_summary, return_json):
self.assertIsNotNone(player_ids)
self.assertEqual(player_ids, [1, 2])
self.assertEqual(include_summary, True)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_players = mock_get_players
fpl = FPLPandas(fpl=fpl_mock)
actual_players_df, actual_history_past_df, actual_history_df, actual_fixture_df = fpl.get_players([1, 2])
assert_frame_equal(expected_players_df, actual_players_df)
assert_frame_equal(expected_history_past_df, actual_history_past_df)
assert_frame_equal(expected_history_df, actual_history_df)
assert_frame_equal(expected_fixtures_df, actual_fixture_df)
def test_get_user_team_with_user(self):
test_data = {'picks': [{'element': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'element': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'chips': [{'attr1': 'value11', 'attr2': 'value12'}],
'transfers': {'attr1': 'value11', 'attr2': 'value12'}}
expected_picks_df = pd.DataFrame.from_dict(test_data['picks']).set_index('element').rename(index={'element': 'player_id'})
expected_chips_df = pd.DataFrame.from_dict(test_data['chips'])
expected_transfers_df = pd.DataFrame.from_dict([test_data['transfers']])
fpl_mock = mock.MagicMock()
async def mock_login(email, password):
self.assertEqual(email, 'email')
self.assertEqual(password, 'password')
async def mock_get_user_info():
return {'player': {'entry': '123'}}
async def mock_get_user_team(user_id):
self.assertEqual(user_id, '123')
return test_data
fpl_mock.get_user_team = mock_get_user_team
fpl_mock.get_user_info = mock_get_user_info
fpl_mock.login = mock_login
fpl = FPLPandas('email', 'password', fpl=fpl_mock)
actual_picks_df, actual_chips_df, actual_transfers_df = fpl.get_user_team()
assert_frame_equal(expected_picks_df, actual_picks_df)
assert_frame_equal(expected_chips_df, actual_chips_df)
assert_frame_equal(expected_transfers_df, actual_transfers_df)
def test_get_user_team_with_user_id(self):
test_data = {'picks': [{'element': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'element': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'chips': [{'attr1': 'value11', 'attr2': 'value12'}],
'transfers': {'attr1': 'value11', 'attr2': 'value12'}}
expected_picks_df = pd.DataFrame.from_dict(test_data['picks']).set_index('element').rename(index={'element': 'player_id'})
expected_chips_df = pd.DataFrame.from_dict(test_data['chips'])
expected_transfers_df = pd.DataFrame.from_dict([test_data['transfers']])
fpl_mock = mock.MagicMock()
async def mock_login(email, password):
self.assertEqual(email, 'email')
self.assertEqual(password, 'password')
async def mock_get_user_team(user_id):
self.assertEqual(user_id, 456)
return test_data
fpl_mock.get_user_team = mock_get_user_team
fpl_mock.login = mock_login
fpl = FPLPandas('email', 'password', fpl=fpl_mock)
actual_picks_df, actual_chips_df, actual_transfers_df = fpl.get_user_team(456)
assert_frame_equal(expected_picks_df, actual_picks_df)
assert_frame_equal(expected_chips_df, actual_chips_df)
assert_frame_equal(expected_transfers_df, actual_transfers_df)
def test_get_user_team_no_email(self):
fpl_mock = mock.MagicMock()
fpl = FPLPandas(None, 'password', fpl=fpl_mock)
with self.assertRaisesRegex(ValueError, 'email'):
fpl.get_user_team()
def test_get_user_team_no_password(self):
fpl_mock = mock.MagicMock()
fpl = FPLPandas('email', None, fpl=fpl_mock)
with self.assertRaisesRegex(ValueError, 'password'):
fpl.get_user_team()
def test_get_user_info(self):
test_data = {'player': {'entry': '123'}}
expected_df = pd.DataFrame.from_dict([test_data['player']])
fpl_mock = mock.MagicMock()
async def mock_login(email, password):
self.assertEqual(email, 'email')
self.assertEqual(password, 'password')
async def mock_get_user_info():
return {'player': {'entry': '123'}}
fpl_mock.get_user_info = mock_get_user_info
fpl_mock.login = mock_login
fpl = FPLPandas('email', 'password', fpl=fpl_mock)
actual_df = fpl.get_user_info()
| assert_frame_equal(expected_df, actual_df) | pandas.util.testing.assert_frame_equal |
# Written by i3s
import os
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import seaborn as sns
import time
from sklearn.model_selection import KFold
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
def proj_l1ball(y, eta):
"""
Note that the y should be better 1D or after some element-wise operation, the results will turn to be un predictable.
This function will automatically reshape the y as (m,), where m is the y.size, or the y.shape[0]*y.shape[1].
"""
if type(y) is not np.ndarray:
y = np.array(y)
if y.ndim > 1:
y = np.reshape(y, (-1,))
return np.maximum(
np.absolute(y)
- np.amax(
[
np.amax(
(np.cumsum(np.sort(np.absolute(y), axis=0)[::-1], axis=0) - eta)
/ (np.arange(y.shape[0]) + 1)
),
0,
]
),
0,
) * np.sign(y)
def centroids(XW, Y, k):
Y = np.reshape(Y, -1)
d = XW.shape[1]
mu = np.zeros((k, d))
"""
since in python the index starts from 0 not from 1,
here the Y==i will be change to Y==(i+1)
Or the values in Y need to be changed
"""
for i in range(k):
C = XW[Y == (i + 1), :]
mu[i, :] = np.mean(C, axis=0)
return mu
def class2indicator(y, k):
if len(y.shape) > 1:
# Either throw exception or transform y, here the latter is chosen.
# Note that a list object has no attribute 'flatten()' as np.array do,
# We use x = np.reshape(y,-1) instead of x = y.flatten() in case of
# the type of 'list' of argument y
y = np.reshape(y, -1)
n = len(y)
Y = np.zeros((n, k)) # dtype=float by default
"""
since in python the index starts from 0 not from 1,
here the y==i in matlab will be change to y==(i+1)
"""
for i in range(k):
Y[:, i] = y == (i + 1)
return Y
def nb_Genes(w):
# Return the number of selected genes from the matrix (numpy.ndarray) w
d = w.shape[0]
ind_genes = np.zeros((d, 1))
for i in range(d):
if np.linalg.norm(w[i, :]) > 0:
ind_genes[i] = 1
indGene_w = np.where(ind_genes == 1)[0]
nbG = int(np.sum(ind_genes))
return nbG, indGene_w
def select_feature_w(w, featurenames):
k = w.shape[1]
d = w.shape[0]
lst_features = []
lst_norm = []
for i in range(k):
s_tmp = w[:, i] # the i-th column
f_tmp = np.abs(s_tmp) # the absolute values of this column
ind = np.argsort(f_tmp)[
::-1
] # the indices of the sorted abs column (descending order)
f_tmp = np.sort(f_tmp)[::-1] # the sorted abs column (descending order)
nonzero_inds = np.nonzero(f_tmp)[0] # the nonzero indices
lst_f = []
lst_n = []
if len(nonzero_inds) > 0:
nozero_ind = nonzero_inds[-1] # choose the last nonzero index
if nozero_ind == 0:
lst_f.append(featurenames[ind[0]])
lst_n.append(s_tmp[ind[0]])
else:
for j in range(nozero_ind + 1):
lst_f.append(featurenames[ind[j]])
lst_n = s_tmp[ind[0 : (nozero_ind + 1)]]
lst_features.append(lst_f)
lst_norm.append(lst_n)
n_cols_f = len(lst_features)
n_rows_f = max(map(len, lst_features)) # maxmum subset length
n_cols_n = len(lst_norm)
n_rows_n = max(map(len, lst_norm))
for i in range(n_cols_f):
ft = np.array(lst_features[i])
ft.resize(n_rows_f, refcheck=False)
nt = np.array(lst_norm[i])
nt.resize(n_rows_n, refcheck=False)
if i == 0:
features = ft
normW = nt
continue
features = np.vstack((features, ft))
normW = np.vstack((normW, nt))
features = features.T
normW = normW.T
return features, normW
def compute_accuracy(idxR, idx, k):
"""
# ===============================
#----- INPUT
# idxR : real labels
# idx : estimated labels
# k : number of class
#----- OUTPUT
# ACC_glob : global accuracy
# tab_acc : accuracy per class
# ===============================
"""
# Note that Python native sum function works better on list than on numpy.array
# while numpy.sum function works better on numpy.array than on list.
# So it will choose numpy.array as the default type for idxR and idx
if type(idxR) is not np.array:
idxR = np.array(idxR)
if type(idx) is not np.array:
idx = np.array(idx)
if idxR.ndim == 2 and 1 not in idxR.shape:
idxR = np.reshape(idxR, (-1, 1))
if idx.ndim == 1:
idx = np.reshape(idx, idxR.shape)
# Global accuracy
y = np.sum(idxR == idx)
ACC_glob = y / len(idxR)
# Accuracy per class
tab_acc = np.zeros((1, k))
"""
since in python the index starts from 0 not from 1,
here the idx(ind)==j in matlab will be change to idx[ind]==(j+1)
"""
for j in range(k):
ind = np.where(idxR == (j + 1))[0]
if len(ind) == 0:
tab_acc[0, j] = 0.0
else:
tab_acc[0, j] = int(np.sum(idx[ind] == (j + 1))) / len(ind)
return ACC_glob, tab_acc
def predict_L1(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
# print(distmu)
# sns.kdeplot(np.array(distmu), shade=True, bw=0.1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
# function to compute the \rho value
def predict_L1_molecule(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
confidence = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
confidence[i] = (distmu[0, 1] - distmu[0, 0]) / (distmu[0, 1] + distmu[0, 0])
return Ytest, confidence
# =============================Plot functions=================================================
# function to plot the distribution of \rho
def rhoHist(rho, n_equal_bins):
"""
# ===============================
#----- INPUT
# rho : df_confidence
# n_equal_bins : the number of histogram bins
#
#----- OUTPUT
# plt.show()
# ===============================
"""
# The leftmost and rightmost bin edges
first_edge, last_edge = rho.min(), rho.max()
bin_edges = np.linspace(
start=first_edge, stop=last_edge, num=n_equal_bins + 1, endpoint=True
)
_ = plt.hist(rho, bins=bin_edges)
plt.title("Histogram of confidence score")
plt.show()
def pd_plot(X, Yr, W, flag=None):
plt.figure()
X_transform = np.dot(X, W)
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_transform[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_transform[index2[0], :]
c2 = np.mean(X_2, axis=0)
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("Primal_Dual")
plt.legend()
plt.show()
def pca_plot(X, Yr, W, flag=None):
plt.figure()
# if flag==True:
# X=np.dot(X,W)
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
X_norm = X_pca
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_norm[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_norm[index2[0], :]
c2 = np.mean(X_2, axis=0)
# plt.scatter(X_2[:,0],X_2[:,8],c='g',label='cluster2')
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("PCA")
plt.legend()
plt.show()
def Predrejection(df_confidence, eps, num_eps):
"""
# =====================================================================
# It calculates the false rate according to the value of epsilon
#
#----- INPUT
# df_confidence : dataframe which contains predicted label,
# original label and rho
# eps : the threshold
# num_eps : the number of epsilon that can be tested
#----- OUTPUT
# FalseRate : An array that contains the falserate according to epsilon
# =====================================================================
"""
Yr = np.array(df_confidence["Yoriginal"])
Yr[np.where(Yr == 2)] = -1
Ypre = np.array(df_confidence["Ypred"])
Ypre[np.where(Ypre == 2)] = -1
rho = df_confidence["rho"]
epsList = np.arange(0, eps, eps / num_eps)
falseRate = []
rejectSample = []
for epsilon in epsList:
index = np.where((-epsilon < rho) & (rho < epsilon))
Yr[index] = 0
Ypre[index] = 0
Ydiff = Yr - Ypre
rejectRate = len(index[0]) / len(Yr)
error = len(np.where(Ydiff != 0)[0]) / len(Yr)
falseRate.append(error)
rejectSample.append(rejectRate)
plt.figure()
plt.plot(epsList, falseRate)
plt.xlabel("Confidence score prediction")
plt.ylabel("FN+FP (ratio)")
# plot the number of rejected samples
plt.figure()
plt.plot(epsList, rejectSample)
plt.xlabel("Confidence score prediction")
plt.ylabel(" Reject samples (ratio) ")
return np.array(falseRate)
# ==============================================================================
def predict_FISTA(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 2)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
def normest(X, tol=1.0e-6, maxiter=100):
# import necessary modules
import scipy.sparse
import numpy as np
import warnings
if scipy.sparse.issparse(X):
x = np.array(np.sum(np.abs(X), axis=0))
x = np.reshape(x, max(x.shape))
elif type(X) == np.matrix:
x = np.sum(np.abs(np.asarray(X)), axis=0)
x = np.reshape(x, max(x.shape))
else:
x = np.sum(np.abs(X), axis=0)
norm_e = np.linalg.norm(x)
if norm_e == 0:
return norm_e
x = x / norm_e
norm_e0 = 0
count = 0
while np.abs(norm_e - norm_e0) > tol * norm_e:
norm_e0 = norm_e
Xx = np.matmul(X, x)
if np.count_nonzero(Xx) == 0:
Xx = np.random.rand(Xx.shape[0])
x = np.matmul(X.T, Xx)
normx = np.linalg.norm(x)
norm_e = normx / np.linalg.norm(Xx)
x = x / normx
count += 1
if count > maxiter:
warnings.warn(
"Normest::NotConverge:the number of iterations exceeds {} times.\nThe error is {}, the tolerance is {}".format(
maxiter, np.abs(norm_e - norm_e0), tol
),
RuntimeWarning,
)
break
return norm_e
def merge_topGene_norm(topGenes, normW, clusternames):
"""
# =====================================================================
# It merge the two output from function select_features_w into a new
# pandas.DataFrame whose columns will be the elements in clusternames
# and each of the column will have two subcolumns: topGenes and weight
#
#----- INPUT
# topGenes : ndarray of top Genes chosen by select_features_w
# normW : normWeight of each genes given by select_features_w
# clusternames : A list of the names of each class.
#----- OUTPUT
# df_res : A DataFrame with each colum the first subcolumn the genes
# and second subcolumn their norm of weight
# =====================================================================
"""
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
lst_col.append((clusternames[i], "Weights"))
df_res = pd.DataFrame(res, columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
return df_res
def merge_topGene_norm_acc(
topGenes,
normW,
clusternames,
acctest,
nbr_features=30,
saveres=False,
file_tag=None,
outputPath="../results/",
):
"""
# =============================================================================================== \n
# Based on the function merge_topGebe_norm, replace the column name for \n
# normW by the accuracy \n
#----- INPUT \n
# topGenes (ndarray or DataFrame) : Top Genes chosen by select_features_w \n
# normW (ndarray or DataFrame) : The normWeight of each genes given by select_features_w \n
# clusternames (list or array) : A list of the names of each class \n
# acctest (list or array) : The list of the test accuracy \n
# saveres (optional, boolean) : True if we want to save the result to local \n
# file_tag (optional, string) : A file tag which will be the prefix of the file name \n
# outputPath (optional, string) : The output Path of the file \n
# ----- OUTPUT \n
# df_res : A DataFrame with each colum the first subcolumn the genes \n
# and second subcolumn their norm of weight \n
# =============================================================================================== \n
"""
if type(topGenes) is pd.DataFrame:
topGenes = topGenes.values
if type(normW) is pd.DataFrame:
normW = normW.values
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
acctest_mean = acctest.values.tolist()[4]
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
astr = str(acctest_mean[i])
lst_col.append((astr, "Weights"))
df_res = pd.DataFrame(res[0:nbr_features, :], columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
if saveres:
df_res.to_csv(
"{}{}_Heatmap of Acc_normW_Topgenes.csv".format(outputPath, file_tag),
sep=";",
)
return df_res
def compare_2topGenes(
topGenes1,
topGenes2,
normW1=None,
normW2=None,
lst_col=None,
nbr_limit=30,
printOut=False,
):
"""
#=======================================================================================
# Compare column by column the elements between to topGenes, it choose for
# each column first "nbr" elements to check.
# The two topGenes should be in same size of columns
# ----- INPUT
# topGenes1, topGenes2 (DataFrame) : Two topGenes to be compared
# normW1, normW2 (DataFrame,optional): Two matrix of weights correspondent. Default: None
# lst_col (list, optional) : If given, only the chosen column will be compared. Default: None
# nbr_limit (scalar, optional) : Number of the lines to be compared. Default: 30
# printOut (boolean, optional) : If True, the comparison result will be shown on screen. Default: False
# ----- OUTPUT
# out (string) : It returns a string of the comparing result as output.
#=======================================================================================
"""
import pandas as pd
import numpy as np
if type(topGenes1) != type(topGenes2):
raise ValueError("The two topGenes to be compared should be of the same type.")
if type(topGenes1) is not pd.DataFrame:
col = ["C" + str(i) for i in topGenes1.shape[1]]
topGenes1 = pd.DataFrame(topGenes1, columns=col)
topGenes2 = pd.DataFrame(topGenes2, columns=col)
out = []
out.append("Comparing the two TopGenes:\n")
# After the benchmark, the appended list and then converted to whole string seems to be the least consuming
list_name = list(topGenes1.columns)
if lst_col is not None:
list_name = [list_name[ind] for ind in lst_col]
for name in list_name:
out.append(
"{0:{fill}{align}40}\n".format(" Class %s " % name, fill="=", align="^")
)
col_1 = np.array(topGenes1[[name]], dtype=str)
col_2 = np.array(topGenes2[[name]], dtype=str)
# Here np.nozero will return a tuple of 2 array corresponding the first
# and the second dimension while the value of second dimension will
# always be 0. So the first dimension's last location+1 will be the length
# of nonzero arrays and that it's just the location of the first zero
# element
length_nonzero_1 = np.nonzero(col_1)[0][-1] + 1
length_nonzero_2 = np.nonzero(col_2)[0][-1] + 1
# np.nonzero will not detect '0.0' as zero type
if all(col_1 == "0.0"):
length_nonzero_1 = 0
if all(col_2 == "0.0"):
length_nonzero_2 = 0
length_min = min(length_nonzero_1, length_nonzero_2)
# Check if at least one of the classes contains only zero and avoid the error
if length_min == 0 and length_nonzero_1 == length_nonzero_2:
out.append(
"* Warning: No feature is selected for both two class\n Skipped for this class"
)
continue
elif length_min == 0 and length_nonzero_1 > 0:
out.append(
"* Warning: No feature is selected for this class in TopGenes2\n"
)
out.append(
"* All {} elements are included only in topGenes1:\n".format(
min(length_nonzero_1, nbr_limit)
)
)
for k in range(min(length_nonzero_1, nbr_limit)):
if normW1 is None:
out.append(" (%s)\n" % (str(col_1[k, 0])))
else:
out.append(
" (%s, %s)\n" % (str(col_1[k, 0]), normW1[[name]].iloc[k, 0])
)
continue
elif length_min == 0 and length_nonzero_2 > 0:
out.append(
"* Warning: No feature is selected for this class in TopGenes1\n"
)
out.append(
"* All {} elements are included only in topGenes2:\n".format(
min(length_nonzero_2, nbr_limit)
)
)
for k in range(min(length_nonzero_2, nbr_limit)):
if normW2 is None:
out.append(" (%s)\n" % (str(col_2[k, 0])))
else:
out.append(
" (%s, %s)\n" % (str(col_2[k, 0]), normW2[[name]].iloc[k, 0])
)
continue
if length_min < nbr_limit:
length = length_min
out.append(
"* Warning: In this column, the 1st topGenes has {} nozero elements\n* while the 2nd one has {} nonzero elements\n".format(
length_nonzero_1, length_nonzero_2
)
)
out.append("* So only first %d elements are compared\n\n" % length_min)
else:
length = nbr_limit
set_1 = col_1[0:length]
set_2 = col_2[0:length]
set_common = np.intersect1d(set_1, set_2) # Have in common
set_o1 = np.setdiff1d(set_1, set_2) # Exclusively in topGenes1
set_o2 = np.setdiff1d(set_2, set_1) # Exclusively in topGenes2
lc = len(set_common)
# print exclusively in topGenes1
out.append(
"Included exclusively in first topGenes: {} elements in total.\n".format(
length - lc
)
)
if length - lc > 0:
if normW1 is None:
out.append("Details:(Name)\n")
else:
out.append("Details:(Name,Weight)\n")
idx_i, idx_j = np.where(topGenes1[[name]].isin(set_o1))
for i, j in zip(idx_i, idx_j):
if normW1 is None:
out.append(" (%s)\n" % str(set_1[i, j]))
else:
out.append(
" (%s, %s)\n"
% (str(set_1[i, j]), str(normW1[[name]].iloc[i, j]))
)
out.append("\nNumber of elements in common:{}\n".format(lc))
# print exclusively in topGenes1
out.append(
"\nIncluded exclusively in second topGenes: {} elements in total.\n".format(
length - lc
)
)
if length - lc > 0:
if normW2 is None:
out.append("Details:(Name)\n")
else:
out.append("Details:(Name,Weight)\n")
idx_i, idx_j = np.where(topGenes2[[name]].isin(set_o2))
for i, j in zip(idx_i, idx_j):
if normW2 is None:
out.append(" (%s)\n" % str(set_2[i, j]))
else:
out.append(
" (%s, %s)\n"
% (str(set_2[i, j]), str(normW2[[name]].iloc[i, j]))
)
out.append("{:-<40}\n".format(""))
out = "".join(out)
if printOut == True:
print(out)
return out
def heatmap_classification(
Ytest,
YR,
clusternames,
rotate=45,
draw_fig=False,
save_fig=False,
func_tag=None,
outputPath="../results/",
):
"""
#=====================================================
# It takes the predicted labels (Ytest), true labels (YR)
# and a list of the names of clusters (clusternames)
# as input and provide the heatmap matrix as the output
#=====================================================
"""
k = len(np.unique(YR)) # If we need to automatically find a k
Heatmap_matrix = np.zeros((k, k))
for i in np.arange(k) + 1:
for j in np.arange(k) + 1:
a = np.where(
Ytest[YR == i] == j, 1, 0
).sum() # number Ytest ==j where YR==i
b = np.where(YR == i, 1, 0).sum()
Heatmap_matrix[i - 1, j - 1] = a / b
# Plotting
if draw_fig == True:
plt.figure(figsize=(10, 6))
annot = False
if k > 10:
annot = False
if clusternames is not None:
axes = sns.heatmap(
Heatmap_matrix,
cmap="jet",
annot=annot,
fmt=".2f",
xticklabels=clusternames,
yticklabels=clusternames,
)
else:
axes = sns.heatmap(Heatmap_matrix, cmap="jet", annot=annot, fmt=".2f")
axes.set_xlabel("Predicted true positive", fontsize=14)
axes.set_ylabel("Ground true", fontsize=14)
axes.tick_params(labelsize=7)
plt.xticks(rotation=rotate)
axes.set_title("Heatmap of confusion Matrix", fontsize=14)
plt.tight_layout()
if save_fig == True:
plt.savefig(
"{}{}_Heatmap_of_confusion_Matrix.png".format(outputPath, func_tag)
)
return Heatmap_matrix
def heatmap_normW(
normW,
clusternames=None,
nbr_l=10,
rotate=45,
draw_fig=False,
save_fig=False,
func_tag=None,
outputPath="../results/",
):
"""
#=====================================================
# It takes the predicted labels (Ytest), true labels (YR)
# and the number of clusters (k) as input and provide the
# heatmap matrix as the output
#=====================================================
"""
A = np.abs(normW)
AN = A / A[0, :]
if normW.shape[0] < nbr_l:
nbr_l = normW.shape[0]
ANR = AN[0:nbr_l, :]
annot = False
if draw_fig == True:
plt.figure(figsize=(10, 6))
# axes2=sns.heatmap(ANR,cmap='jet',annot=annot,fmt='.3f')
if clusternames is None:
axes2 = sns.heatmap(
ANR,
cmap="jet",
annot=annot,
fmt=".3f",
yticklabels=np.linspace(1, nbr_l, num=nbr_l, endpoint=True, dtype=int),
)
else:
axes2 = sns.heatmap(
ANR,
cmap="jet",
annot=annot,
fmt=".3f",
xticklabels=clusternames,
yticklabels=np.linspace(1, nbr_l, num=nbr_l, endpoint=True, dtype=int),
)
plt.xticks(rotation=rotate)
axes2.set_ylabel("Features", fontsize=14)
axes2.set_xlabel("Clusters", fontsize=14)
axes2.tick_params(labelsize=7)
axes2.set_title("Heatmap of Matrix W", fontsize=14)
plt.tight_layout()
if save_fig == True:
plt.savefig("{}{}_Heatmap_of_signature.png".format(outputPath, func_tag))
return ANR
def drop_cells_with_ID(X, Y, ID, n_fold):
"""
# ====================================================================
# This function will detect whether the size of the first dimension of
# X is divisible by n_fold. If not, it will remove the n_diff rows from
# the biggest class(with the largest size in Y) where n_diff=len(Y)%n_fold
#
# ---- Input
# X : The data
# Y : The label
# n_fold : The number of fold
# --- Output
# X_new, Y_new : The new data and the new label
# =====================================================================
"""
m, d = X.shape
if m % n_fold == 0:
return X, Y, ID
n_diff = m % n_fold
# choose in the biggest class to delete
# Find the biggest class
lst_count = []
for i in np.unique(Y):
lst_count.append(np.where(Y == i, 1, 0).sum())
ind_max = np.unique(Y)[np.argmax(lst_count)]
lst_inds = np.where(Y == ind_max)[0]
# Delete n_diff elements in the biggest class
lst_del = np.random.choice(lst_inds, n_diff)
X_new = np.delete(X, lst_del, 0)
Y_new = np.delete(Y, lst_del, 0)
ID_new = np.delete(ID, lst_del, 0)
return X_new, Y_new, ID_new
def drop_cells(X, Y, n_fold):
"""
# ====================================================================
# This function will detect whether the size of the first dimension of
# X is divisible by n_fold. If not, it will remove the n_diff rows from
# the biggest class(with the largest size in Y) where n_diff=len(Y)%n_fold
#
# ---- Input
# X : The data
# Y : The label
# n_fold : The number of fold
# --- Output
# X_new, Y_new : The new data and the new label
# =====================================================================
"""
m, d = X.shape
if m % n_fold == 0:
return X, Y
n_diff = m % n_fold
# choose in the biggest class to delete
# Find the biggest class
lst_count = []
for i in np.unique(Y):
lst_count.append(np.where(Y == i, 1, 0).sum())
ind_max = np.unique(Y)[np.argmax(lst_count)]
lst_inds = np.where(Y == ind_max)[0]
# Delete n_diff elements in the biggest class
lst_del = np.random.choice(lst_inds, n_diff)
X_new = np.delete(X, lst_del, 0)
Y_new = np.delete(Y, lst_del, 0)
return X_new, Y_new
# ===================== Algorithms =======================================
def FISTA_Primal(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# niter : The number of iterations
# gamma : The hyper parameter gamma
# eta : The eta to calculate the projection on l1 ball
# * isEpsilon is not used in the original file in Matlab
# --- Output
# w : The projection matrix
# mu : The centers
# nbGenes_fin : The number of genes of the final step
# loss : The loss for each iteration
# ====================================================================
"""
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = ["niter", "eta", "gamma"] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta = param["eta"]
gamma = param["gamma"]
n, d = X.shape
# === With class2indicator():
# Y = class2indicator(YR,k)
# === With Onehotencoder:
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
loss = np.zeros(niter)
XtX = np.matmul(X.T, X)
XtY = np.matmul(X.T, Y)
w_old = np.ones((d, k))
w_loc = w_old
t_old = 1
for i in range(niter):
grad_w = np.matmul(XtX, w_loc) - XtY
# gradient step
V = w_loc - gamma * grad_w
V = np.reshape(V, d * k)
# Projection on the l1 ball
V = proj_l1ball(V, eta)
# Reshape back
w_new = np.reshape(V, (d, k))
# Chambolle method
t_new = (i + 6) / 4 # or i+6 since pyhton starts from 0 ?
w_loc_new = w_new + ((t_old - 1) / t_new) * (w_new - w_old)
w_old = w_new
w_loc = w_loc_new
t_old = t_new
loss[i] = np.linalg.norm(Y - np.matmul(X, w_loc), "fro") ** 2
# end iteratons
w = w_loc
mu = centroids(np.matmul(X, w), YR, k)
nbGenes_fin = nb_Genes(w)[0]
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss
def primal_dual_L1N(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# param : A type dict paramter which must have keys:
# 'niter', 'eta', 'tau', 'rho','sigma', 'beta', 'tau2' and 'delta'
# Normally speaking:
# (The default value for beta is 0.25.)
# (IF not given, the value of the 'tau2' will be calculated by
# tau2 = 0.25*(1/(np.sqrt(m)*normY)). Note that this normY is
# the 2-norm of the OneHotEncode of the YR given.)
# (Default value of the 'delta' is 1.0)
# --- Output
# w : The projection matrix of size (d,k)
# mu : The centers of classes
# nbGenes_fin : The number of genes of the final result
# loss : The loss for each iteration
# Z : The dual matrix of size (m,k)
# =====================================================================
"""
m, d = X.shape
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
# normY = np.linalg.norm(Y,2)
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = [
"niter",
"eta",
"tau",
"rho",
"sigma",
"delta",
"tau2",
"beta",
] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta = param["eta"]
tau = param["tau"]
rho = param["rho"]
sigma = param["sigma"]
delta = param["delta"]
tau2 = param["tau2"]
# beta = param['beta']
# === END check block ===
# Initialization
w_old = np.ones((d, k))
Z_old = np.ones((m, k))
mu_old = np.eye(k, k)
Ik = np.eye(k, k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau * np.matmul(X.T, Z_old)
# Reshape
V = np.reshape(V, d * k)
V = proj_l1ball(V, eta)
V[np.where(np.abs(V) < 0.001)] = 0
# Reshape back
w_new = np.reshape(V, (d, k))
# no gamma here
# w_new = w_new + gamma*(w_new - w_old) =>
w = 2 * w_new - w_old
mu_new = (mu_old + rho * tau2 * Ik - tau2 * np.matmul(Y.T, Z_old)) / (
1 + tau2 * rho
)
# mu = mu_new + gamma*(mu_new - mu_old) =>
mu = 2 * mu_new - mu_old
Z = (Z_old + sigma * (np.matmul(Y, mu) - np.matmul(X, w))) / (1 + sigma * delta)
Z_new = np.maximum(np.minimum(Z, 1), -1)
mu_old = mu_new
w_old = w_new
Z_old = Z_new
loss[i] = np.linalg.norm(
np.matmul(Y, mu_new) - np.matmul(X, w_new), 1
) + 0.5 * (np.linalg.norm(Ik - mu_new, "fro") ** 2)
# End loop
Z = Z_old
w = w_new
mu = mu_new
nbGenes_fin = nb_Genes(w)[0]
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss, Z
def primal_dual_Nuclear(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# param : A type dict paramter which must have keys:
# 'niter', 'eta_star', 'tau', 'rho','sigma', 'tau2','delta'
# and 'gamma'
# Normally speaking:
# (The default value for beta is 0.25.)
# (IF not given, the value of the 'tau2' will be calculated by
# tau2 = 0.25*(1/(np.sqrt(m)*normY)). Note that this normY is
# the 2-norm of the OneHotEncode of the YR given.)
# (Default value of the 'delta' is 1.0)
# --- Output
# w : The projection matrix of size (d,k)
# mu : The centers of classes
# nbGenes_fin : The number of genes of the final result
# loss : The loss for each iteration
# Z : The dual matrix of size (m,k)
# =====================================================================
"""
m, d = X.shape
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = [
"niter",
"eta_star",
"tau",
"rho",
"sigma",
"tau2",
"beta",
] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta_star = param["eta_star"]
delta = param["delta"]
tau = param["tau"]
rho = param["rho"]
sigma = param["sigma"]
tau2 = param["tau2"]
# === END check block ===
# Initialization
w_old = np.ones((d, k))
Z_old = np.ones((m, k))
mu_old = np.eye(k, k)
Ik = np.eye(k, k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau * np.matmul(X.T, Z_old)
# Nuclear constraint
L, S0, R = np.linalg.svd(V, full_matrices=False)
norm_nuclear = S0.sum()
vs1 = proj_l1ball(S0.reshape((-1,)), eta_star)
S1 = vs1.reshape(S0.shape)
w = np.matmul(L, S1[..., None] * R)
w = 2 * w - w_old
mu_new = (mu_old + rho * tau2 * Ik - tau2 * np.matmul(Y.T, Z_old)) / (
1 + tau2 * rho
)
mu = 2 * mu_new - mu_old
Z = (Z_old + sigma * (np.matmul(Y, mu) - np.matmul(X, w))) / (1 + sigma * delta)
Z_new = np.maximum(np.minimum(Z, 1), -1)
mu_old = mu_new
w_old = w
Z_old = Z_new
loss[i] = np.linalg.norm(np.matmul(Y, mu_new) - np.matmul(X, w), 1) + 0.5 * (
np.linalg.norm(Ik - mu_new, "fro") ** 2
)
# End loop
Z = Z_old
mu = mu_new
nbGenes_fin, _ = nb_Genes(w)
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss, Z
# ================================== Part 2 ====================================
# ===================== Base Launch functions (scripts) ========================
def basic_run_eta(
func_algo,
func_predict,
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=None,
eta_star=None,
gamma=1,
nfold=4,
rng=1,
showres=True,
keepfig=False,
saveres=False,
outputPath="../results/",
):
"""
# =====================================================================
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# - func_algo (necessary) : The function of the algorithm
# - func_predict (necessary) : The function to predict
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma, etc (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - showres (optional) : Boolean value. True if we want to show
# the results, plot the figures etc.
#
# - saveres (optional) : Boolean value. True to save the results
#
# - outputPath (optional) : String value. The output path.
#
# - Output:
# - mu : The centroids
# - nbm : Number of genes
# - accG : Global accuracy
# - loss : Loss for each iterations
# - W_mean : Mean weight matrix for all folds
# - timeElapsed : Time elapsed for one fold
# - (And the tables) : df_topGenes, df_normW, df_topG_normW,
# df_topGenes_mean, df_normW_mean,
# df_topG_normW_mean, df_acctest
# ======================================================================
"""
np.random.seed(rng) # reproducible
if not os.path.exists(outputPath): # make the directory if it does not exist
os.makedirs(outputPath)
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
# Normalize the mean of datas (Deprecated)
# m = np.mean(X,axis=0)
# X = X-m
# normX = normest(X)
# X = X/normX
# YR = np.array(YR).reshape(-1,1)
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["eta_star"] = eta_star
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
accuracy_train = np.zeros((nfold, k + 1))
accuracy_test = np.zeros((nfold, k + 1))
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
W_mean = np.zeros((d, k))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
print("\nStarts trainning for")
print("{:>6}:{:<6}".format("niter", niter))
if "fista" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "or" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "_l2" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
elif "nuclear" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta_star", eta_star))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
else:
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
Y_PDS = np.zeros(YR.shape)
meanclassi = np.zeros(nfold)
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
w_all, mu_all, nbGenes_all, loss_all = func_algo(X, YR, k, param)[0:4]
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
print("-> {} classification...".format(func_algo.__name__))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
startTime = time.perf_counter()
w, mu, nbGenes, loss = func_algo(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
print("-> Completed.\n-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
# Z0[:,:,i] = Z
loss_iter0[i, :] = loss
# ========== Accuracy =========
Ytrain_pred = func_predict(Xtrain, w, mu)
Ytest_pred = func_predict(Xtest, w, mu)
accuracy_train[i, 0], accuracy_train[i, 1 : k + 1] = compute_accuracy(
Ytrain, Ytrain_pred, k
)
accuracy_test[i, 0], accuracy_test[i, 1 : k + 1] = compute_accuracy(
Ytest, Ytest_pred, k
)
meanclassi[i] = np.mean(accuracy_test[i, 1 : k + 1])
nbG[i] = nbGenes
Y_PDS[test_ind] = Ytest_pred
print("{:-<30}".format(""))
# end kfold loop
nbm = int(nbG.mean())
accG = np.mean(accuracy_test[:, 0], axis=0)
Meanclass = meanclassi.mean()
W_mean = np.mean(W0, axis=2)
mu_mean = np.mean(mu0, axis=2)
# Z_mean= np.mean(Z0,axis=2)
normfro = np.linalg.norm(w, "fro")
print("Training step ends.\n")
# Class size
Ctab = []
size_class = np.zeros(k) # Size of each class (real)
size_class_est = np.zeros(k) # Size of each class (estimated)
for j in range(k):
size_class[j] = (YR == (j + 1)).sum()
size_class_est[j] = (Y_PDS == (j + 1)).sum()
Ctab.append("Class {}".format(j + 1))
df_szclass = pd.DataFrame(size_class, index=Ctab, columns=["Class Size"])
df_szclass_est = pd.DataFrame(size_class_est, index=Ctab, columns=["Class Size"])
# Data accuracy
accuracy_train = np.vstack((accuracy_train, np.mean(accuracy_train, axis=0)))
accuracy_test = np.vstack((accuracy_test, np.mean(accuracy_test, axis=0)))
ind_df = []
for i_fold in range(nfold):
ind_df.append("Fold {}".format(i_fold + 1))
ind_df.append("Mean")
columns = ["Global"]
if clusternames is None:
columns += Ctab
else:
columns += clusternames
df_accTrain = pd.DataFrame(accuracy_train, index=ind_df, columns=columns)
df_acctest = pd.DataFrame(accuracy_test, index=ind_df, columns=columns)
# Feature selection
print("Selecting features from whole dataset...", end="")
w, mu, nbGenes, loss = func_algo(X, YR, k, param)[0:4]
topGenes, normW = select_feature_w(w, genenames)
topGenes_mean, normW_mean = select_feature_w(W_mean, genenames)
# Mean of each fold
df_topGenes_mean = pd.DataFrame(topGenes_mean, columns=clusternames)
df_normW_mean = pd.DataFrame(normW_mean, columns=clusternames)
df_topG_normW_mean = merge_topGene_norm(topGenes_mean, normW_mean, clusternames)
# All data
df_topGenes = pd.DataFrame(topGenes, columns=clusternames)
df_normW = pd.DataFrame(normW, columns=clusternames)
df_topG_normW = merge_topGene_norm(topGenes, normW, clusternames)
print("Completed.\n")
# Two heatmaps
M_heatmap_classification = heatmap_classification(
Y_PDS, YR, clusternames, rotate=60
)
M_heatmap_signature = heatmap_normW(normW, clusternames, nbr_l=30, rotate=60)
# Results
if showres == True:
print("Size class (real):")
print(df_szclass)
print("\nSize class (estimated):")
print(df_szclass_est)
print("\nAccuracy Train")
print(df_accTrain)
print("\nAccuracy Test")
print(df_acctest)
if keepfig == False:
plt.close("all")
fig_lossIter = plt.figure(figsize=(8, 6))
plt.plot(np.arange(niter, dtype=int) + 1, loss)
msg_eta = "$\eta$:%d" % eta if eta is not None else ""
msg_etaS = "$\eta*$:%d" % eta_star if eta_star is not None else ""
plt.title(
"loss for each iteration {} {}\n ({})".format(
msg_eta, msg_etaS, func_algo.__name__
),
fontsize=18,
)
plt.ylabel("Loss", fontsize=18)
plt.xlabel("Iteration", fontsize=18)
plt.xticks(np.linspace(1, niter, num=6, endpoint=True, dtype=int))
plt.xlim(left=1, right=niter)
plt.ylim((0, 1))
# Saving Result
if saveres == True:
# define two nametags
nametag_eta = "_eta-%d" % eta if eta is not None else ""
nametag_etaS = "_etaStar-%d" % eta_star if eta_star is not None else ""
# save loss
filename_loss = "loss_{}_beta-{}_delta-{}{}{}_niter-{}.txt".format(
func_algo.__name__, beta, delta, nametag_eta, nametag_etaS, niter
)
np.savetxt(outputPath + filename_loss, loss)
# define function name tag for two heatmaps
func_tag = func_algo.__name__ + nametag_eta + nametag_etaS
# Save heatmaps
filename_heat = "{}{}_Heatmap_of_confusion_Matrix.npy".format(
outputPath, func_tag
)
np.save(filename_heat, M_heatmap_classification)
filename_heat = "{}{}_Heatmap_of_signature_Matrix.npy".format(
outputPath, func_tag
)
np.save(filename_heat, M_heatmap_signature)
df_acctest.to_csv(
"{}{}{}{}_AccuracyTest.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
df_topG_normW.to_csv(
"{}{}{}{}_TopGenesAndNormW.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
# Other possiblilities to save
# fig_lossIter.savefig('{}{}{}{}_niter-{}_loss_iters.png'.format(outputPath,func_algo.__name__,nametag_eta,nametag_etaS,niter))
# All data
# df_topGenes.to_csv('{}{}_TopGenes.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW.to_csv('{}{}_NormW.csv'.format(outputPath,func_algo.__name__),sep=';')
# Mean of each fold
# df_topGenes_mean.to_csv('{}{}_TopGenes_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW_mean.to_csv('{}{}_NormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_topG_normW_mean.to_csv('{}{}_TopGenesAndNormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
return (
mu_mean,
nbm,
accG,
loss,
W_mean,
timeElapsed,
df_topGenes,
df_normW,
df_topG_normW,
df_topGenes_mean,
df_normW_mean,
df_topG_normW_mean,
df_acctest,
w_all,
)
# ===================== ========================================================
def getPredLabel(Ypred):
for i in range(Ypred.shape[0]):
if Ypred[i] > 1.5:
Ypred[i] = 2
if Ypred[i] <= 1.5:
Ypred[i] = 1
return Ypred
# =====================Functions used to compare different algorithms========================================================
def getCoefs(alg, model):
if alg == "RF":
coef = model.feature_importances_
if alg == "svm":
coef = model.coef_.transpose()
if alg == "plsda":
coef = model.coef_
return coef
# =====================Functions used to compute the ranked features and their weights=======================
def TopGenbinary(w, feature_names):
n = len(w)
difference = np.zeros(n)
for i in range(n):
difference[i] = w[i][0] - w[i][1]
df1 = pd.DataFrame(feature_names, columns=["pd"])
df1["weights"] = difference
# =====Sort the difference based on the absolute value=========
df1["sort_helper"] = df1["weights"].abs()
df2 = df1.sort_values(by="sort_helper", ascending=False).drop("sort_helper", axis=1)
# ==== end_sort=============
return df2
def rankFeatureHelper(alg, coef, feature_names):
df1 = pd.DataFrame(feature_names, columns=[alg])
df1["weights"] = coef
df1["sort_helper"] = df1["weights"].abs()
df2 = df1.sort_values(by="sort_helper", ascending=False).drop("sort_helper", axis=1)
return df2
def rankFeatures(X, Yr, algList, feature_names):
# flag=0
featureList = []
for alg in algList:
if alg == "svm":
clf = SVC(probability=True, kernel="linear")
model = clf.fit(X, Yr.ravel())
coef = model.coef_.transpose()
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
if alg == "RF":
clf = RandomForestClassifier(n_estimators=400, random_state=10, max_depth=3)
model = clf.fit(X, Yr.ravel())
coef = model.feature_importances_
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
if alg == "plsda":
clf = PLSRegression(n_components=4, scale=False)
model = clf.fit(X, Yr.ravel())
coef = model.coef_
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
# if flag == 0:
# df_rankFeature = TopGenbinary(coef, feature_names)
# flag =1
# else:
# df_feature = TopGenbinary(coef, feature_names)
# df_rankFeature
return featureList
# ===============================Compute the \rho==============================
def basic_run_eta_molecule(
X,
YR,
ID,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=500,
gamma=1,
nfold=4,
random_seed=1,
):
"""
# =====================================================================
# This function is used to compute the df_confidence
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# The function of the algorithm: primal_dual_L1N
# The function to predict: predict_L1_molecule
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - Output:
# - Yprediction : list of Predicted labels
# ======================================================================
"""
np.random.seed(random_seed) # reproducible
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# See more details in drop_cells
X, YR, Ident = drop_cells_with_ID(X, YR, ID, nfold)
dico = dict(list(enumerate(Ident)))
ref = pd.DataFrame.from_dict(dico, orient="index")
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
print("\nStarts trainning for")
print("{:>6}:{:<6}".format("niter", niter))
print("{:>6}:{:<6}".format("eta", eta))
if "fista" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("gamma", delta))
elif "or" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "_l2" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
else:
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
Yprediction = []
Confidence = []
# accuracy_train = np.zeros((nfold,k+1))
# accuracy_test = np.zeros((nfold,k+1))
ID = []
Ident = []
kf = KFold(n_splits=nfold, random_state=random_seed, shuffle=True)
w_all, mu_all, nbGenes_all, loss_all = primal_dual_L1N(X, YR, k, param)[0:4]
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
print("-> {} classification...".format(primal_dual_L1N.__name__))
# ========== Training =========
dico = dico
Xtrain = X[train_ind]
Ytrain = YR[train_ind]
Xtest = X[test_ind]
startTime = time.perf_counter()
w, mu, nbGenes, loss = primal_dual_L1N(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
print("-> Completed.\n-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
loss_iter0[i, :] = loss
# ========== Prediction =========
Ypred, conf = predict_L1_molecule(Xtest, w, mu)
Yprediction.append(Ypred)
Confidence.append(conf)
ID.append(test_ind)
Ident.append(ref.iloc[test_ind])
nbG[i] = nbGenes
print("{:-<30}".format(""))
# end kfold loop
return Yprediction, Confidence, ID, Ident, YR, ref
# ===================== Base Launch functions (scripts) ========================
def basic_run_eta_compare(
func_algo,
func_predict,
X,
YR,
k,
alglist,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=None,
eta_star=None,
gamma=1,
nfold=4,
rng=1,
showres=False,
keepfig=False,
saveres=False,
outputPath="../results/",
):
"""
# =====================================================================
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# - func_algo (necessary) : The function of the algorithm
# - func_predict (necessary) : The function to predict
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma, etc (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - showres (optional) : Boolean value. True if we want to show
# the results, plot the figures etc.
#
# - saveres (optional) : Boolean value. True to save the results
#
# - alglist (optional) : The seed to control the random funcion
#
# - outputPath (optional) : String value. The output path.
#
#
# - Output:
# - mu : The centroids
# - nbm : Number of genes
# - accG : Global accuracy
# - loss : Loss for each iterations
# - W_mean : Mean weight matrix for all folds
# - timeElapsed : Time elapsed for one fold
# - (And the tables) : df_topGenes, df_normW, df_topG_normW,
# df_topGenes_mean, df_normW_mean,
# df_topG_normW_mean, df_acctest
# ======================================================================
"""
np.random.seed(rng) # reproducible
if not os.path.exists(outputPath): # make the directory if it does not exist
os.makedirs(outputPath)
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
# Normalize the mean of datas (Deprecated)
# m = np.mean(X,axis=0)
# X = X-m
# normX = normest(X)
# X = X/normX
# YR = np.array(YR).reshape(-1,1)
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["eta_star"] = eta_star
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
accuracy_train = np.zeros((nfold, k + 1))
accuracy_test = np.zeros((nfold, k + 1))
auc_train = np.zeros((nfold))
auc_test = np.zeros((nfold))
sil_train = np.zeros((nfold))
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
W_mean = np.zeros((d, k))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
# print('\nStarts trainning for')
# print('{:>6}:{:<6}'.format('niter',niter))
Y_PDS = np.zeros(YR.shape)
meanclassi = np.zeros(nfold)
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
numalg = len(alglist)
accuracy_train_comp = np.zeros((nfold, numalg))
accuracy_test_comp = np.zeros((nfold, numalg))
AUC_train_comp = np.zeros((nfold, numalg * 4))
AUC_test_comp = np.zeros((nfold, numalg * 4))
timeElapsedMatrix = np.zeros((nfold, numalg + 1))
w_all, mu_all, nbGenes_all, loss_all = func_algo(X, YR, k, param)[0:4]
# 4-flod cross validation
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
Ytr = pd.get_dummies(Ytrain.ravel()).values.T.T
Yte = pd.get_dummies(Ytest.ravel())
startTime = time.perf_counter()
w, mu, nbGenes, loss = func_algo(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
timeElapsedMatrix[i][numalg] = timeElapsed
print("-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
# Z0[:,:,i] = Z
loss_iter0[i, :] = loss
# ========== Accuracy =========
Ytrain_pred = func_predict(Xtrain, w, mu)
Ytest_pred = func_predict(Xtest, w, mu)
accuracy_train[i, 0], accuracy_train[i, 1 : k + 1] = compute_accuracy(
Ytrain, Ytrain_pred, k
)
accuracy_test[i, 0], accuracy_test[i, 1 : k + 1] = compute_accuracy(
Ytest, Ytest_pred, k
)
if (
np.unique(Ytest).shape[0] == 2
and np.unique(Ytest_pred.astype("int64")).shape[0] == 2
):
auc_test[i] = roc_auc_score(Ytest_pred.astype("int64"), Ytest)
auc_train[i] = roc_auc_score(Ytrain_pred.astype("int64"), Ytrain)
meanclassi[i] = np.mean(accuracy_test[i, 1 : k + 1])
nbG[i] = nbGenes
Y_PDS[test_ind] = Ytest_pred
# start loop of other algorithms' comparison
for j in range(numalg):
alg = alglist[j]
if alg == "svm":
tuned_parameters = [
{"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]},
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
clf = GridSearchCV(SVC(), tuned_parameters)
# clf = SVC(probability=True,kernel='linear')
if alg == "RF":
clf = RandomForestClassifier(
n_estimators=400, random_state=10, max_depth=3
)
if alg == "plsda":
clf = PLSRegression(n_components=4, scale=False)
# build the model
startTime = time.perf_counter()
# clf = OneVsRestClassifier(clf)
model = clf.fit(Xtrain, Ytrain.ravel())
# model = clf.fit(X,Ytr)
# if (alg == 'svm'):
# print(clf.best_params_)
endTime = time.perf_counter()
timeElapsedMatrix[i][j] = endTime - startTime
if k > 2:
Ypred_test = np.around(
model.predict(Xtest)
).ravel() # getPredLabel(model.predict(Xtest))
Ypred_train = np.around(
model.predict(Xtrain)
).ravel() # getPredLabel(model.predict(Xtrain))
else:
Ypred_test = getPredLabel(model.predict(Xtest))
Ypred_train = getPredLabel(model.predict(Xtrain))
accuracy_test_comp[i][j] = accuracy_score(Ypred_test.astype("int64"), Ytest)
accuracy_train_comp[i][j] = accuracy_score(
Ypred_train.astype("int64"), Ytrain
)
# print("sil = ", metrics.silhouette_score(model.x_scores_, Ypred_train) )
if alg == "plsda":
sil_train[i] = metrics.silhouette_score(model.x_scores_, Ypred_train)
if (
np.unique(Ytest).shape[0] == 2
and np.unique(Ypred_test.astype("int64")).shape[0] == 2
):
AUC_test_comp[i][j * 4] = roc_auc_score(
Ypred_test.astype("int64"), Ytest
)
AUC_train_comp[i][j * 4] = roc_auc_score(
Ypred_train.astype("int64"), Ytrain
)
# F1 precision recal
AUC_train_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytrain, Ypred_train.astype("int64"), average="macro"
)[
:-1
]
AUC_test_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytest, Ypred_test.astype("int64"), average="macro"
)[
:-1
]
# end kfold loop
nbm = int(nbG.mean())
accG = np.mean(accuracy_test[:, 0], axis=0)
Meanclass = meanclassi.mean()
W_mean = np.mean(W0, axis=2)
mu_mean = np.mean(mu0, axis=2)
# Z_mean= np.mean(Z0,axis=2)
normfro = np.linalg.norm(w, "fro")
# Class size
Ctab = []
size_class = np.zeros(k) # Size of each class (real)
size_class_est = np.zeros(k) # Size of each class (estimated)
for j in range(k):
size_class[j] = (YR == (j + 1)).sum()
size_class_est[j] = (Y_PDS == (j + 1)).sum()
Ctab.append("Class {}".format(j + 1))
df_szclass = pd.DataFrame(size_class, index=Ctab, columns=["Class Size"])
df_szclass_est = pd.DataFrame(size_class_est, index=Ctab, columns=["Class Size"])
# Data accuracy
accuracy_train = np.vstack((accuracy_train, np.mean(accuracy_train, axis=0)))
accuracy_test = np.vstack((accuracy_test, np.mean(accuracy_test, axis=0)))
# auc_train = np.vstack((auc_train,np.mean(auc_train,axis=0)))
# auc_test = np.vstack((auc_test,np.mean(auc_test,axis=0)))
ind_df = []
for i_fold in range(nfold):
ind_df.append("Fold {}".format(i_fold + 1))
ind_df.append("Mean")
columns = ["Global"]
if clusternames is None:
columns += Ctab
else:
columns += clusternames
df_accTrain = pd.DataFrame(accuracy_train, index=ind_df, columns=columns)
df_acctest = pd.DataFrame(accuracy_test, index=ind_df, columns=columns)
# Data accuracy1
ind_df_comp = []
for i_fold in range(nfold):
ind_df_comp.append("Fold {}".format(i_fold + 1))
df_comp = pd.DataFrame(accuracy_test_comp, index=ind_df_comp, columns=alglist)
df_comp.loc["Mean"] = df_comp.mean()
df_comp["pd"] = df_acctest["Global"]
colauc = []
for met in alglist:
colauc.append(met + " AUC")
colauc.append(met + " Precision")
colauc.append(met + " Recall")
colauc.append(met + " F1 score")
df_compauc = pd.DataFrame(AUC_test_comp, index=ind_df_comp, columns=colauc)
df_compauc["pd"] = auc_test
df_compauc["sil_plsda"] = sil_train
df_compauc.loc["Mean"] = df_compauc.mean()
alglen = len(alglist)
alglist1 = []
for i in range(alglen):
alglist1.append(alglist[i])
alglist1.append("pd")
df_timeElapsed = pd.DataFrame(
timeElapsedMatrix, index=ind_df_comp, columns=alglist1
)
df_timeElapsed.loc["Mean"] = df_timeElapsed.mean()
# Feature selection
print("Selecting features from whole dataset...", end="")
w, mu, nbGenes, loss = func_algo(X, YR, k, param)[0:4]
topGenes, normW = select_feature_w(w, genenames)
topGenes_mean, normW_mean = select_feature_w(W_mean, genenames)
# Mean of each fold
df_topGenes_mean = pd.DataFrame(topGenes_mean, columns=clusternames)
df_normW_mean = pd.DataFrame(normW_mean, columns=clusternames)
df_topG_normW_mean = merge_topGene_norm(topGenes_mean, normW_mean, clusternames)
# All data
df_topGenes = pd.DataFrame(topGenes, columns=clusternames)
df_normW = | pd.DataFrame(normW, columns=clusternames) | pandas.DataFrame |
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import numpy as np
import pandas as pd
ID_COLS = ['CountryName',
'RegionName',
'Date']
NPI_COLUMNS = ['C1_School closing',
'C2_Workplace closing',
'C3_Cancel public events',
'C4_Restrictions on gatherings',
'C5_Close public transport',
'C6_Stay at home requirements',
'C7_Restrictions on internal movement',
'C8_International travel controls',
'H1_Public information campaigns',
'H2_Testing policy',
'H3_Contact tracing']
# From https://github.com/OxCGRT/covid-policy-tracker/blob/master/documentation/codebook.md
MAX_NPIS = [3, 3, 2, 4, 2, 3, 2, 4, 2, 3, 2] # Sum is 30
def generate_scenario(start_date_str, end_date_str, raw_df, countries=None, scenario="Freeze"):
"""
Generates a scenario: a list of intervention plans, with history since 1/1/2020.
By default returns historical data.
Args:
start_date_str: start_date from which to apply the scenario
end_date_str: end_date of the data
raw_df: the original data frame containing the raw data
countries: a list of CountryName, or None for all countries
scenario:
- "Freeze" to keep the last known IP for every future date
- "MIN" to set all future IP to 0 (i.e. plan is to take no measures)
- "MAX" to set all future IP to maximum values (i.e. plan is to do everything possible)
- an array of integers of NPI_COLUMNS lengths: uses this array as the IP to use.
Returns: a Pandas DataFrame
"""
start_date = | pd.to_datetime(start_date_str, format='%Y-%m-%d') | pandas.to_datetime |
# Parameters
XGB_WEIGHT = 0.6200
BASELINE_WEIGHT = 0.0200
OLS_WEIGHT = 0.0700
NN_WEIGHT = 0.0600
XGB1_WEIGHT = 0.8000 # Weight of first in combination of two XGB models
BASELINE_PRED = 0.0115 # Baseline based on mean of training data, per Oleg
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import gc
from sklearn.linear_model import LinearRegression
import random
import datetime as dt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout, BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.optimizers import Adam
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Imputer
##### READ IN RAW DATA
print( "\nReading data from disk ...")
prop = pd.read_csv('../input/properties_2016.csv')
train = pd.read_csv("../input/train_2016_v2.csv")
################
################
## LightGBM ##
################
################
# This section is (I think) originally derived from SIDHARTH's script:
# https://www.kaggle.com/sidharthkumar/trying-lightgbm
# which was forked and tuned by <NAME>:
# https://www.kaggle.com/yuqingxue/lightgbm-85-97
# and updated by me (<NAME>):
# https://www.kaggle.com/aharless/lightgbm-with-outliers-remaining
# and a lot of additional changes have happened since then
##### PROCESS DATA FOR LIGHTGBM
print( "\nProcessing data for LightGBM ..." )
for c, dtype in zip(prop.columns, prop.dtypes):
if dtype == np.float64:
prop[c] = prop[c].astype(np.float32)
df_train = train.merge(prop, how='left', on='parcelid')
df_train.fillna(df_train.median(),inplace = True)
x_train = df_train.drop(['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc',
'propertycountylandusecode', 'fireplacecnt', 'fireplaceflag'], axis=1)
#x_train['Ratio_1'] = x_train['taxvaluedollarcnt']/x_train['taxamount']
y_train = df_train['logerror'].values
print(x_train.shape, y_train.shape)
train_columns = x_train.columns
for c in x_train.dtypes[x_train.dtypes == object].index.values:
x_train[c] = (x_train[c] == True)
del df_train; gc.collect()
x_train = x_train.values.astype(np.float32, copy=False)
d_train = lgb.Dataset(x_train, label=y_train)
##### RUN LIGHTGBM
params = {}
params['max_bin'] = 10
params['learning_rate'] = 0.0021 # shrinkage_rate
params['boosting_type'] = 'gbdt'
params['objective'] = 'regression'
params['metric'] = 'l1' # or 'mae'
params['sub_feature'] = 0.345 # feature_fraction (small values => use very different submodels)
params['bagging_fraction'] = 0.85 # sub_row
params['bagging_freq'] = 40
params['num_leaves'] = 512 # num_leaf
params['min_data'] = 500 # min_data_in_leaf
params['min_hessian'] = 0.05 # min_sum_hessian_in_leaf
params['verbose'] = 0
params['feature_fraction_seed'] = 2
params['bagging_seed'] = 3
np.random.seed(0)
random.seed(0)
print("\nFitting LightGBM model ...")
clf = lgb.train(params, d_train, 430)
del d_train; gc.collect()
del x_train; gc.collect()
print("\nPrepare for LightGBM prediction ...")
print(" Read sample file ...")
sample = pd.read_csv('../input/sample_submission.csv')
print(" ...")
sample['parcelid'] = sample['ParcelId']
print(" Merge with property data ...")
df_test = sample.merge(prop, on='parcelid', how='left')
print(" ...")
del sample, prop; gc.collect()
print(" ...")
#df_test['Ratio_1'] = df_test['taxvaluedollarcnt']/df_test['taxamount']
x_test = df_test[train_columns]
print(" ...")
del df_test; gc.collect()
print(" Preparing x_test...")
for c in x_test.dtypes[x_test.dtypes == object].index.values:
x_test[c] = (x_test[c] == True)
print(" ...")
x_test = x_test.values.astype(np.float32, copy=False)
print("\nStart LightGBM prediction ...")
p_test = clf.predict(x_test)
del x_test; gc.collect()
print( "\nUnadjusted LightGBM predictions:" )
print( pd.DataFrame(p_test).head() )
################
################
## XGBoost ##
################
################
# This section is (I think) originally derived from Infinite Wing's script:
# https://www.kaggle.com/infinitewing/xgboost-without-outliers-lb-0-06463
# inspired by this thread:
# https://www.kaggle.com/c/zillow-prize-1/discussion/33710
# but the code has gone through a lot of changes since then
##### RE-READ PROPERTIES FILE
##### (I tried keeping a copy, but the program crashed.)
print( "\nRe-reading properties file ...")
properties = pd.read_csv('../input/properties_2016.csv')
##### PROCESS DATA FOR XGBOOST
print( "\nProcessing data for XGBoost ...")
for c in properties.columns:
properties[c]=properties[c].fillna(-1)
if properties[c].dtype == 'object':
lbl = LabelEncoder()
lbl.fit(list(properties[c].values))
properties[c] = lbl.transform(list(properties[c].values))
train_df = train.merge(properties, how='left', on='parcelid')
x_train = train_df.drop(['parcelid', 'logerror','transactiondate'], axis=1)
x_test = properties.drop(['parcelid'], axis=1)
# shape
print('Shape train: {}\nShape test: {}'.format(x_train.shape, x_test.shape))
# drop out ouliers
train_df=train_df[ train_df.logerror > -0.4 ]
train_df=train_df[ train_df.logerror < 0.419 ]
x_train=train_df.drop(['parcelid', 'logerror','transactiondate'], axis=1)
y_train = train_df["logerror"].values.astype(np.float32)
y_mean = np.mean(y_train)
print('After removing outliers:')
print('Shape train: {}\nShape test: {}'.format(x_train.shape, x_test.shape))
##### RUN XGBOOST
print("\nSetting up data for XGBoost ...")
# xgboost params
xgb_params = {
'eta': 0.037,
'max_depth': 5,
'subsample': 0.80,
'objective': 'reg:linear',
'eval_metric': 'mae',
'lambda': 0.8,
'alpha': 0.4,
'base_score': y_mean,
'silent': 1
}
dtrain = xgb.DMatrix(x_train, y_train)
dtest = xgb.DMatrix(x_test)
num_boost_rounds = 250
print("num_boost_rounds="+str(num_boost_rounds))
# train model
print( "\nTraining XGBoost ...")
model = xgb.train(dict(xgb_params, silent=1), dtrain, num_boost_round=num_boost_rounds)
print( "\nPredicting with XGBoost ...")
xgb_pred1 = model.predict(dtest)
print( "\nFirst XGBoost predictions:" )
print( pd.DataFrame(xgb_pred1).head() )
##### RUN XGBOOST AGAIN
print("\nSetting up data for XGBoost ...")
# xgboost params
xgb_params = {
'eta': 0.033,
'max_depth': 6,
'subsample': 0.80,
'objective': 'reg:linear',
'eval_metric': 'mae',
'base_score': y_mean,
'silent': 1
}
num_boost_rounds = 150
print("num_boost_rounds="+str(num_boost_rounds))
print( "\nTraining XGBoost again ...")
model = xgb.train(dict(xgb_params, silent=1), dtrain, num_boost_round=num_boost_rounds)
print( "\nPredicting with XGBoost again ...")
xgb_pred2 = model.predict(dtest)
print( "\nSecond XGBoost predictions:" )
print( pd.DataFrame(xgb_pred2).head() )
##### COMBINE XGBOOST RESULTS
xgb_pred = XGB1_WEIGHT*xgb_pred1 + (1-XGB1_WEIGHT)*xgb_pred2
#xgb_pred = xgb_pred1
print( "\nCombined XGBoost predictions:" )
print( pd.DataFrame(xgb_pred).head() )
del train_df
del x_train
del x_test
del properties
del dtest
del dtrain
del xgb_pred1
del xgb_pred2
gc.collect()
######################
######################
## Neural Network ##
######################
######################
# Neural network copied from this script:
# https://www.kaggle.com/aharless/keras-neural-network-lb-06492 (version 20)
# which was built on the skeleton in this notebook:
# https://www.kaggle.com/prasunmishra/ann-using-keras
# Read in data for neural network
print( "\n\nProcessing data for Neural Network ...")
print('\nLoading train, prop and sample data...')
train = pd.read_csv("../input/train_2016_v2.csv", parse_dates=["transactiondate"])
prop = pd.read_csv('../input/properties_2016.csv')
sample = | pd.read_csv('../input/sample_submission.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
from pandas.compat import range
import pandas.util.testing as tm
from pandas import read_csv
import os
import nose
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
import pandas.tools.rplot as rplot
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def between(a, b, x):
"""Check if x is in the somewhere between a and b.
Parameters:
-----------
a: float, interval start
b: float, interval end
x: float, value to test for
Returns:
--------
True if x is between a and b, False otherwise
"""
if a < b:
return x >= a and x <= b
else:
return x <= a and x >= b
@tm.mplskip
class TestUtilityFunctions(tm.TestCase):
"""
Tests for RPlot utility functions.
"""
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
def test_make_aes1(self):
aes = rplot.make_aes()
self.assertTrue(aes['x'] is None)
self.assertTrue(aes['y'] is None)
self.assertTrue(aes['size'] is None)
self.assertTrue(aes['colour'] is None)
self.assertTrue(aes['shape'] is None)
self.assertTrue(aes['alpha'] is None)
self.assertTrue(isinstance(aes, dict))
def test_make_aes2(self):
self.assertRaises(ValueError, rplot.make_aes,
size=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
colour=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
shape=rplot.ScaleSize('test'))
self.assertRaises(ValueError, rplot.make_aes,
alpha=rplot.ScaleShape('test'))
def test_dictionary_union(self):
dict1 = {1 : 1, 2 : 2, 3 : 3}
dict2 = {1 : 1, 2 : 2, 4 : 4}
union = rplot.dictionary_union(dict1, dict2)
self.assertEqual(len(union), 4)
keys = list(union.keys())
self.assertTrue(1 in keys)
self.assertTrue(2 in keys)
self.assertTrue(3 in keys)
self.assertTrue(4 in keys)
self.assertEqual(rplot.dictionary_union(dict1, {}), dict1)
self.assertEqual(rplot.dictionary_union({}, dict1), dict1)
self.assertEqual(rplot.dictionary_union({}, {}), {})
def test_merge_aes(self):
layer1 = rplot.Layer(size=rplot.ScaleSize('test'))
layer2 = rplot.Layer(shape=rplot.ScaleShape('test'))
rplot.merge_aes(layer1, layer2)
self.assertTrue(isinstance(layer2.aes['size'], rplot.ScaleSize))
self.assertTrue(isinstance(layer2.aes['shape'], rplot.ScaleShape))
self.assertEqual(layer2.aes['size'], layer1.aes['size'])
for key in layer2.aes.keys():
if key != 'size' and key != 'shape':
self.assertTrue(layer2.aes[key] is None)
def test_sequence_layers(self):
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='SepalLength', y='SepalWidth',
size=rplot.ScaleSize('PetalLength'))
layer3 = rplot.GeomPolyFit(2)
result = rplot.sequence_layers([layer1, layer2, layer3])
self.assertEqual(len(result), 3)
last = result[-1]
self.assertEqual(last.aes['x'], 'SepalLength')
self.assertEqual(last.aes['y'], 'SepalWidth')
self.assertTrue(isinstance(last.aes['size'], rplot.ScaleSize))
self.assertTrue(self.data is last.data)
self.assertTrue(rplot.sequence_layers([layer1])[0] is layer1)
@tm.mplskip
class TestTrellis(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/tips.csv')
self.data = read_csv(path, sep=',')
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='total_bill', y='tip')
layer3 = | rplot.GeomPolyFit(2) | pandas.tools.rplot.GeomPolyFit |
import pandas as pd
from abb_deeplearning.abb_data_pipeline import abb_clouddrl_read_pipeline as abb_rp
from abb_deeplearning.abb_data_pipeline import abb_clouddrl_constants as abb_c
import datetime as dt
import os
df_master = pd.read_hdf('/media/data/Daten/data_C_int/master_index_cav.h5')
file_filter={"_sp_256", ".jpeg"}
df_data_files=[]
df_label_files=[]
#(dt.datetime.strptime('2015-09-28', '%Y-%m-%d'),dt.datetime.strptime('2015-09-30', '%Y-%m-%d'))
for day in abb_rp.read_cld_img_time_range_paths(img_d_tup_l=None,automatic_daytime=True,
file_filter=file_filter, get_sp_data=True,get_cs_data=True,get_mpc_data=True, randomize_days=False):
#Img
image_keys = list(day[0].keys())
img_data = list(day[0].values())
folders = [p.split('/')[5] for p in img_data]
names = [p.split('/')[6] for p in img_data]
img_df = pd.DataFrame(data={'folder':folders,'name':names},index=image_keys)
#IRR
irr = pd.read_csv(day[1] , index_col=0, parse_dates=True,
header=None)
irr_data = irr.loc[pd.to_datetime(image_keys)]
irr_data.columns = [['irradiation_hs']]
#MPC100
mpc= pd.read_csv(day[2].rsplit('.',1)[0]+"100.csv", index_col=0, parse_dates=True,
header=None)
mpc_data = mpc.loc[pd.to_datetime(image_keys)]
mpc_data.columns = [['mpc100']]
mpc40 = pd.read_csv(day[2].rsplit('.', 1)[0] + "40.csv", index_col=0, parse_dates=True,
header=None)
mpc_data_40 = mpc40.loc[pd.to_datetime(image_keys)]
mpc_data_40.columns = [['mpc40']]
#Naive100
naive = pd.read_csv(day[2].rsplit('-', 1)[0] + "-naive100.csv", index_col=0, parse_dates=True,
header=None)
naive_data = naive.loc[pd.to_datetime(image_keys)]
naive_data.columns = [['naive100']]
naive_new = pd.read_csv(day[2].rsplit('-', 1)[0] + "-naive100_new.csv", index_col=0, parse_dates=True,
header=None)
naive_data_new = naive_new.loc[pd.to_datetime(image_keys)]
naive_data_new.columns = [['naive100_new']]
naive_new_40 = pd.read_csv(day[2].rsplit('-', 1)[0] + "-naive40_new.csv", index_col=0, parse_dates=True,
header=None)
naive_data_new_40 = naive_new_40.loc[pd.to_datetime(image_keys)]
naive_data_new_40.columns = [['naive40_new']]
df_master = df_master[~df_master.index.duplicated(keep='last')]
df_temp = df_master.loc[pd.to_datetime(image_keys)]['T1']
print(len(df_temp),len(image_keys))
df_t1 = pd.DataFrame(data={'T1':df_temp.values},index=image_keys)
# Clearsky
cs= pd.read_csv(day[3], index_col=0, parse_dates=True,
header=None) # read sp file data with sunspot coordinates
cs_data = cs.loc[pd.to_datetime(image_keys)]
cs_data.columns = [['ghi']]
#Sunspot coords
sunspot_data = pd.read_csv(day[4], index_col=0, parse_dates=True,
header=None) # read sp file data with sunspot coordinates
sunspot_coords = sunspot_data.loc[ | pd.to_datetime(image_keys) | pandas.to_datetime |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
Interval,
IntervalIndex,
Timedelta,
Timestamp,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
( | Index([0, 2, 4]) | pandas.Index |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = | pd.DataFrame([], columns=['foo', 'bar']) | pandas.DataFrame |
import argparse
import datetime
import os
import pandas as pd
import tasking_manager_stats.data_management as dm
def get_args():
parser = argparse.ArgumentParser(description='Agregate users data from tasking manager API')
parser.add_argument('merged_stats', type=str, help='Path of the merged stats CSV file')
parser.add_argument('stats_one_author', type=str, help='Path of the merged stats 1 author by task type CSV file')
parser.add_argument('mapathon', type=str, help='Path of the mapathon CSV file')
parser.add_argument('-max_date', type=str, help='Date (%Y_%m_%d) to stop data and compute if contributor come back')
return parser.parse_args()
def compute_mapathon_number(mapathon_file, stats_file, max_date=None):
mapathons = pd.read_csv(mapathon_file)
mapathons['Date'] = pd.to_datetime(mapathons['Date'], dayfirst=True)
# Extract projects of the mapathon
mapathons2 = pd.DataFrame()
for _, row in mapathons.iterrows():
tasks = row['Tasks']
if pd.isnull(tasks):
continue
projects = set()
for s in tasks.split('/'):
try:
projects.add(int(s))
except:
pass
for s in tasks.split(', '):
try:
projects.add(int(s))
except:
pass
# Create new mapathon line for each project
for project in projects:
mapathons2 = pd.concat([mapathons2, pd.DataFrame(data=[(row['Date'], row['City'], project)],
columns=['date', 'City', 'Project'])], axis=0,
ignore_index=True)
# Compute number of mapathons by contributor
df = pd.read_csv(stats_file, encoding='ISO-8859-1')
df['date'] = df['Year'].astype(str) + '-' + df['Month'].astype(str) + '-' + df['Day'].astype(str)
df['date'] = pd.to_datetime(df['date'], yearfirst=True)
if max_date is not None:
df = df[df['date'] <= max_date]
df2 = df[(df['Hour'] > 17) & (df['Hour'] < 22)]
df3 = pd.merge(df2.loc[df2['Type'] == 'MAPPING'], mapathons2, on=['date', 'Project'])
df4 = df3[['date', 'Author', 'City', 'Project']].drop_duplicates()
res = df4[['Author', 'date']].drop_duplicates().groupby('Author').count().date.reset_index()
res.columns = ['Author', 'MapathonNb']
return res
def compute_validation_time_by_task(stats_dir, csv_file, max_date=None):
df_project = pd.read_csv(os.path.join(stats_dir, csv_file), encoding='ISO-8859-1')
df_project = df_project[df_project['Type'] == 'VALIDATION']
if len(df_project) == 0:
return pd.DataFrame()
if max_date is not None:
df_project['date'] = df_project['Year'].astype(str) + '-' + df_project['Month'].astype(str) + '-' + df_project['Day'].astype(str)
df_project['date'] = pd.to_datetime(df_project['date'], yearfirst=True)
df_project = df_project[df_project['date'] <= max_date]
if len(df_project) == 0:
return pd.DataFrame()
key = ['Year', 'Month', 'Day', 'Rel. Day', 'Hour', 'Minute', 'Second', 'Duration', 'Author', 'Type']
df_project2 = df_project.groupby(key).count().Task
df_project2 = df_project2.reset_index()
df_project3 = pd.merge(df_project, df_project2, on=key)
df_project3['Duration'] /= df_project3['Task_y']
df_project3 = df_project3[['Project', 'Task_x', 'Duration']]
df_project3.columns = ['Project', 'Task', 'Duration']
df_project3 = df_project3.groupby(['Project', 'Task']).sum().reset_index()
return df_project3
def compute_validation_time_by_task_all_projects(stats_dir, max_date=None):
df = pd.DataFrame()
for file in os.listdir(stats_dir):
if file.endswith('.csv'):
df = pd.concat([df, compute_validation_time_by_task(stats_dir, file, max_date)], axis=0, ignore_index=True)
return df
def compute_advanced_stats(stats_dir, df_one_author, df, max_date=None):
validation_df = compute_validation_time_by_task_all_projects(stats_dir, max_date)
validation_df.columns = ['Project', 'Task', 'ValidationDuration']
if max_date is not None:
df_one_author['date'] = df_one_author['Year'].astype(int).astype(str) + '-' + df_one_author['Month'].astype(int).astype(str) + '-' + df_one_author['Day'].astype(int).astype(str)
df_one_author['date'] = pd.to_datetime(df_one_author['date'], yearfirst=True)
df_one_author = df_one_author[df_one_author['date'] <= max_date]
del df_one_author['date']
df = df[df['date'] <= max_date]
df_mapping = pd.merge(validation_df, df_one_author[df_one_author['Type'] == 'MAPPING'], on=['Project', 'Task'])
df_mapping_valid = df_mapping.groupby('Author').sum().ValidationDuration.reset_index()
df_pure_mapping = df[df['Type'] == 'MAPPING'].groupby('Author').sum().Duration.reset_index()
df_pure_mapping.columns = ['Author', 'OwnMappingDuration']
res = pd.merge(df_pure_mapping, df_mapping_valid, on='Author', how='left')
df_pure_validation = df[df['Type'] == 'VALIDATION'].groupby('Author').sum().Duration.reset_index()
df_pure_validation.columns = ['Author', 'OwnValidationDuration']
res = pd.merge(res, df_pure_validation, on='Author', how='left')
res['ValidationOnOwnMappingDuration'] = res['ValidationDuration'] / res['OwnMappingDuration']
return res
def aggregate_merged_stats(merged_stats, max_date=None):
if max_date is not None:
merged_stats = merged_stats[merged_stats['date'] <= max_date]
df = merged_stats[['date', 'Author']].drop_duplicates()
df = df.groupby('Author').count().date.reset_index()
df.columns = ['Author', 'ContribDayNb']
df2 = merged_stats.groupby('Author').min().date.reset_index()
df2.columns = ['Author', 'FirstContrib']
df = pd.merge(df, df2, on='Author')
df3 = merged_stats.groupby('Author').max().date.reset_index()
df3.columns = ['Author', 'LatestContrib']
df = pd.merge(df, df3, on='Author')
df4 = merged_stats[['Project', 'Author']].drop_duplicates()
df4 = df4.groupby('Author').count().Project.reset_index()
df4.columns = ['Author', 'ProjectNb']
df = pd.merge(df, df4, on='Author')
return df
def aggregate_merged_stats_one_author_by_task_type(merged_stats_one_author_by_task_type, max_date=None):
if max_date is not None:
merged_stats_one_author_by_task_type['date'] = merged_stats_one_author_by_task_type['Year'].astype(int).astype(str) + '-' + merged_stats_one_author_by_task_type['Month'].astype(int).astype(str) + '-' + merged_stats_one_author_by_task_type['Day'].astype(int).astype(str)
merged_stats_one_author_by_task_type['date'] = pd.to_datetime(merged_stats_one_author_by_task_type['date'], yearfirst=True)
merged_stats_one_author_by_task_type = merged_stats_one_author_by_task_type[merged_stats_one_author_by_task_type['date'] <= max_date]
del merged_stats_one_author_by_task_type['date']
df = merged_stats_one_author_by_task_type[merged_stats_one_author_by_task_type['Type'] == 'MAPPING']
df = df.groupby('Author').count().Type.reset_index()
df.columns = ['Author', 'MappingTaskNb']
df2 = merged_stats_one_author_by_task_type[merged_stats_one_author_by_task_type['Type'] == 'VALIDATION']
df2 = df2.groupby('Author').count().Type.reset_index()
df2.columns = ['Author', 'ValidationTaskNb']
df = pd.merge(df, df2, on='Author', how='left')
return df
if __name__ == '__main__':
args = get_args()
max_date = datetime.datetime.strptime(args.max_date, '%Y_%m_%d') if args.max_date is not None else None
merged_stats = pd.read_csv(args.merged_stats, encoding='ISO-8859-1')
merged_stats['date'] = merged_stats['Year'].astype(str) + '-' + merged_stats['Month'].astype(str) + '-' + merged_stats['Day'].astype(str)
merged_stats['date'] = pd.to_datetime(merged_stats['date'], yearfirst=True)
user_stats = aggregate_merged_stats(merged_stats, max_date)
merged_stats_one_author_by_task_type = pd.read_csv(args.stats_one_author, encoding='ISO-8859-1')
user_stats2 = aggregate_merged_stats_one_author_by_task_type(merged_stats_one_author_by_task_type, max_date)
user_stats = pd.merge(user_stats, user_stats2, on='Author')
mapathon_stats = compute_mapathon_number(args.mapathon, args.merged_stats, max_date)
user_stats = pd.merge(user_stats, mapathon_stats, on='Author', how='left')
adv_stats = compute_advanced_stats(os.path.join(dm.get_data_dir(), 'stats'), merged_stats_one_author_by_task_type,
merged_stats, max_date)
user_stats = pd.merge(user_stats, adv_stats, on='Author', how='left')
filename = 'agregated_user_stats.csv'
if max_date is not None:
come_back = merged_stats[merged_stats['date'] > max_date]['Author'].unique()
come_back_df = pd.DataFrame(come_back, columns=['Author'])
come_back_df['ComeBack'] = 1
user_stats = | pd.merge(user_stats, come_back_df, on='Author', how='left') | pandas.merge |
"""CoinGecko model"""
__docformat__ = "numpy"
import logging
from typing import Any, Dict, List, Optional, Tuple, Union
import pandas as pd
import regex as re
from pycoingecko import CoinGeckoAPI
from gamestonk_terminal.cryptocurrency.dataframe_helpers import (
lambda_replace_underscores_in_column_names,
)
from gamestonk_terminal.cryptocurrency.discovery.pycoingecko_model import read_file_data
from gamestonk_terminal.cryptocurrency.pycoingecko_helpers import (
DENOMINATION,
calc_change,
create_dictionary_with_prefixes,
filter_list,
find_discord,
remove_keys,
rename_columns_in_dct,
)
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
CHANNELS = {
"telegram_channel_identifier": "telegram",
"twitter_screen_name": "twitter",
"subreddit_url": "subreddit",
"bitcointalk_thread_identifier": "bitcointalk",
"facebook_username": "facebook",
"discord": "discord",
}
BASE_INFO = [
"id",
"name",
"symbol",
"asset_platform_id",
"description",
"contract_address",
"market_cap_rank",
"public_interest_score",
]
@log_start_end(log=logger)
def get_coin_potential_returns(
main_coin: str,
vs: Union[str, None] = None,
top: Union[int, None] = None,
price: Union[int, None] = None,
) -> pd.DataFrame:
"""Fetch data to calculate potential returns of a certain coin. [Source: CoinGecko]
Parameters
----------
main_coin : str
Coin loaded to check potential returns for (e.g., algorand)
vs : str | None
Coin to compare main_coin with (e.g., bitcoin)
top : int | None
Number of coins with highest market cap to compare main_coin with (e.g., 5)
price
Target price of main_coin to check potential returns (e.g., 5)
Returns
-------
pd.DataFrame
Potential returns data
Columns: Coin, Current Price, Target Coin, Potential Price, Potential Market Cap ($), Change (%)
"""
client = CoinGeckoAPI()
COLUMNS = [
"Coin",
"Current Price ($)",
"Current Market Cap ($)",
"Target Coin",
"Potential Price ($)",
"Potential Market Cap ($)",
"Change (%)",
]
if top and top > 0: # user wants to compare with top coins
data = client.get_price(
ids=f"{main_coin}",
vs_currencies="usd",
include_market_cap=True,
include_24hr_vol=False,
include_24hr_change=False,
include_last_updated_at=False,
)
top_coins_data = client.get_coins_markets(
vs_currency="usd", per_page=top, order="market_cap_desc"
)
main_coin_data = data[main_coin]
diff_arr = []
for coin in top_coins_data:
market_cap_difference_percentage = calc_change(
coin["market_cap"], main_coin_data["usd_market_cap"]
)
future_price = main_coin_data["usd"] * (
1 + market_cap_difference_percentage / 100
)
diff_arr.append(
[
main_coin,
main_coin_data["usd"],
main_coin_data["usd_market_cap"],
coin["id"],
future_price,
coin["market_cap"],
market_cap_difference_percentage,
]
)
return pd.DataFrame(
data=diff_arr,
columns=COLUMNS,
)
if vs: # user passed a coin
data = client.get_price(
ids=f"{main_coin},{vs}",
vs_currencies="usd",
include_market_cap=True,
include_24hr_vol=False,
include_24hr_change=False,
include_last_updated_at=False,
)
main_coin_data = data[main_coin]
vs_coin_data = data[vs]
if main_coin_data and vs_coin_data:
market_cap_difference_percentage = calc_change(
vs_coin_data["usd_market_cap"], main_coin_data["usd_market_cap"]
)
future_price = main_coin_data["usd"] * (
1 + market_cap_difference_percentage / 100
)
return pd.DataFrame(
data=[
[
main_coin,
main_coin_data["usd"],
main_coin_data["usd_market_cap"],
vs,
future_price,
vs_coin_data["usd_market_cap"],
market_cap_difference_percentage,
]
],
columns=COLUMNS,
)
if price and price > 0: # user passed a price
data = client.get_price(
ids=main_coin,
vs_currencies="usd",
include_market_cap=True,
include_24hr_vol=False,
include_24hr_change=False,
include_last_updated_at=False,
)
main_coin_data = data[main_coin]
if main_coin_data:
final_market_cap = (
main_coin_data["usd_market_cap"] * price / main_coin_data["usd"]
)
market_cap_difference_percentage = calc_change(
final_market_cap, main_coin_data["usd_market_cap"]
)
future_price = main_coin_data["usd"] * (
1 + market_cap_difference_percentage / 100
)
return pd.DataFrame(
data=[
[
main_coin,
main_coin_data["usd"],
main_coin_data["usd_market_cap"],
"",
future_price,
final_market_cap,
market_cap_difference_percentage,
]
],
columns=COLUMNS,
)
return pd.DataFrame()
@log_start_end(log=logger)
def check_coin(coin_id: str):
coins = read_file_data("coingecko_coins.json")
for coin in coins:
if coin["id"] == coin_id:
return coin["id"]
if coin["symbol"] == coin_id:
return coin["id"]
return None
@log_start_end(log=logger)
def get_coin_market_chart(
coin_id: str = "", vs_currency: str = "usd", days: int = 30, **kwargs: Any
) -> pd.DataFrame:
"""Get prices for given coin. [Source: CoinGecko]
Parameters
----------
vs_currency: str
currency vs which display data
days: int
number of days to display the data
kwargs
Returns
-------
pandas.DataFrame
Prices for given coin
Columns: time, price, currency
"""
client = CoinGeckoAPI()
prices = client.get_coin_market_chart_by_id(coin_id, vs_currency, days, **kwargs)
prices = prices["prices"]
df = pd.DataFrame(data=prices, columns=["time", "price"])
df["time"] = pd.to_datetime(df.time, unit="ms")
df = df.set_index("time")
df["currency"] = vs_currency
return df
class Coin:
"""Coin class, it holds loaded coin"""
@log_start_end(log=logger)
def __init__(self, symbol: str, load_from_api: bool = False):
self.client = CoinGeckoAPI()
if load_from_api:
self._coin_list = self.client.get_coins_list()
else:
self._coin_list = read_file_data("coingecko_coins.json")
self.coin_symbol, self.symbol = self._validate_coin(symbol)
if self.coin_symbol:
self.coin: Dict[Any, Any] = self._get_coin_info()
@log_start_end(log=logger)
def __str__(self):
return f"{self.coin_symbol}"
@log_start_end(log=logger)
def _validate_coin(self, search_coin: str) -> Tuple[Optional[Any], Optional[Any]]:
"""Validate if given coin symbol or id exists in list of available coins on CoinGecko.
If yes it returns coin id. [Source: CoinGecko]
Parameters
----------
symbol: str
Either coin symbol or coin id
Returns
-------
Tuple[str, str]
- str with coin
- str with symbol
"""
coin = None
symbol = None
for dct in self._coin_list:
if search_coin.lower() in [
dct["id"],
dct["symbol"],
]:
coin = dct.get("id")
symbol = dct.get("symbol")
return coin, symbol
raise ValueError(f"Could not find coin with the given id: {search_coin}\n")
@log_start_end(log=logger)
def coin_list(self) -> list:
"""List all available coins [Source: CoinGecko]
Returns
-------
list
list of all available coin ids
"""
return [token.get("id") for token in self._coin_list]
@log_start_end(log=logger)
def _get_coin_info(self) -> dict:
"""Helper method which fetch the coin information by id from CoinGecko API like:
(name, price, market, ... including exchange tickers) [Source: CoinGecko]
Returns
-------
dict
Coin information
"""
params = dict(localization="false", tickers="false", sparkline=True)
return self.client.get_coin_by_id(self.coin_symbol, **params)
@log_start_end(log=logger)
def _get_links(self) -> Dict:
"""Helper method that extracts links from coin [Source: CoinGecko]
Returns
-------
dict
Links related to coin
"""
return self.coin.get("links", {})
@log_start_end(log=logger)
def get_repositories(self) -> Optional[Any]:
"""Get list of all repositories for given coin [Source: CoinGecko]
Returns
-------
list
Repositories related to coin
"""
return self._get_links().get("repos_url")
@log_start_end(log=logger)
def get_developers_data(self) -> pd.DataFrame:
"""Get coin development data from GitHub or BitBucket like:
number of pull requests, contributor etc [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Developers Data
Columns: Metric, Value
"""
dev = self.coin.get("developer_data", {})
useless_keys = (
"code_additions_deletions_4_weeks",
"last_4_weeks_commit_activity_series",
)
remove_keys(useless_keys, dev)
df = pd.Series(dev).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_blockchain_explorers(self) -> Union[pd.DataFrame, Any]:
"""Get list of URLs to blockchain explorers for given coin. [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Blockchain Explorers
Columns: Metric, Value
"""
blockchain = self._get_links().get("blockchain_site")
if blockchain:
dct = filter_list(blockchain)
df = pd.Series(dct).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
return None
@log_start_end(log=logger)
def get_social_media(self) -> pd.DataFrame:
"""Get list of URLs to social media like twitter, facebook, reddit... [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Urls to social media
Columns: Metric, Value
"""
social_dct = {}
links = self._get_links()
for (
channel
) in CHANNELS.keys(): # pylint: disable=consider-iterating-dictionary)
if channel in links:
value = links.get(channel, "")
if channel == "twitter_screen_name":
value = "https://twitter.com/" + value
elif channel == "bitcointalk_thread_identifier" and value is not None:
value = f"https://bitcointalk.org/index.php?topic={value}"
social_dct[channel] = value
social_dct["discord"] = find_discord(links.get("chat_url"))
dct = rename_columns_in_dct(social_dct, CHANNELS)
df = pd.Series(dct).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_websites(self) -> pd.DataFrame:
"""Get list of URLs to websites like homepage of coin, forum. [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Urls to website, homepage, forum
Columns: Metric, Value
"""
websites_dct = {}
links = self._get_links()
sites = ["homepage", "official_forum_url", "announcement_url"]
for site in sites:
websites_dct[site] = filter_list(links.get(site))
df = pd.Series(websites_dct).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Value"] = df["Value"].apply(lambda x: ",".join(x))
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logging)
def get_categories(self) -> Union[Dict[Any, Any], List[Any]]:
"""Coins categories. [Source: CoinGecko]
Returns
-------
list/dict
Coin categories
"""
return self.coin.get("categories", {})
@log_start_end(log=logger)
def _get_base_market_data_info(self) -> dict:
"""Helper method that fetches all the base market/price information about given coin. [Source: CoinGecko]
Returns
-------
dict
All market related information for given coin
"""
market_dct = {}
market_data = self.coin.get("market_data", {})
for stat in [
"total_supply",
"max_supply",
"circulating_supply",
"price_change_percentage_24h",
"price_change_percentage_7d",
"price_change_percentage_30d",
]:
market_dct[stat] = market_data.get(stat)
prices = create_dictionary_with_prefixes(
["current_price"], market_data, DENOMINATION
)
market_dct.update(prices)
return market_dct
@log_start_end(log=logger)
def get_base_info(self) -> pd.DataFrame:
"""Get all the base information about given coin. [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Base information about coin
"""
regx = r'<a href="(.+?)">|</a>'
results = {}
for attr in BASE_INFO:
info_obj = self.coin.get(attr, {})
if attr == "description":
info_obj = info_obj.get("en")
info_obj = re.sub(regx, "", info_obj)
info_obj = re.sub(r"\r\n\r\n", " ", info_obj)
results[attr] = info_obj
results.update(self._get_base_market_data_info())
df = pd.Series(results).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_market_data(self) -> pd.DataFrame:
"""Get all the base market information about given coin. [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Base market information about coin
Metric,Value
"""
market_data = self.coin.get("market_data", {})
market_columns_denominated = [
"market_cap",
"fully_diluted_valuation",
"total_volume",
"high_24h",
"low_24h",
]
denominated_data = create_dictionary_with_prefixes(
market_columns_denominated, market_data, DENOMINATION
)
market_single_columns = [
"market_cap_rank",
"total_supply",
"max_supply",
"circulating_supply",
"price_change_percentage_24h",
"price_change_percentage_7d",
"price_change_percentage_30d",
"price_change_percentage_60d",
"price_change_percentage_1y",
"market_cap_change_24h",
]
single_stats = {}
for col in market_single_columns:
single_stats[col] = market_data.get(col)
single_stats.update(denominated_data)
try:
single_stats["circulating_supply_to_total_supply_ratio"] = (
single_stats["circulating_supply"] / single_stats["total_supply"]
)
except (ZeroDivisionError, TypeError) as e:
logger.exception(str(e))
console.print(e)
df = pd.Series(single_stats).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_all_time_high(self, currency: str = "usd") -> pd.DataFrame:
"""Get all time high data for given coin. [Source: CoinGecko]
Returns
-------
pandas.DataFrame
All time high price data
Metric,Value
"""
market_data = self.coin.get("market_data", {})
if market_data == {}:
return pd.DataFrame()
ath_columns = [
"current_price",
"ath",
"ath_date",
"ath_change_percentage",
]
results = {}
for column in ath_columns:
results[column] = market_data[column].get(currency)
df = pd.Series(results).to_frame().reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: lambda_replace_underscores_in_column_names(x)
if isinstance(x, str)
else x
)
df["Metric"] = df["Metric"].apply(lambda x: x.replace("Ath", "All Time High"))
df["Metric"] = df["Metric"] + f" {currency.upper()}"
return df[df["Value"].notna()]
@log_start_end(log=logger)
def get_all_time_low(self, currency: str = "usd") -> pd.DataFrame:
"""Get all time low data for given coin. [Source: CoinGecko]
Returns
-------
pandas.DataFrame
All time low price data
Metric,Value
"""
market_data = self.coin.get("market_data", {})
if market_data == {}:
return | pd.DataFrame() | pandas.DataFrame |
import unittest
import pandas as pd
from mlservice.model.feature_prep.polynomial import add_polynomial
class TestPolynomialFeature(unittest.TestCase):
def test_polynomial(self):
test_data = {"a": [1, 2, 3]}
test_data_df = | pd.DataFrame(test_data) | pandas.DataFrame |
# coding: utf-8
# # Create figures for manuscript
#
# Generate figures for manuscript
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('load_ext', 'rpy2.ipython')
import rpy2
from rpy2.robjects.packages import importr
import os
import sys
import glob
import pickle
import pandas as pd
import numpy as np
import rpy2.robjects.lib.ggplot2 as ggplot2
from plotnine import (ggplot,
labs,
geom_line,
geom_point,
geom_errorbar,
aes,
ggsave,
theme_bw,
theme,
facet_wrap,
scale_color_manual,
guides,
guide_legend,
element_blank,
element_text,
element_rect,
element_line,
coords)
sys.path.append("../../")
from functions import utils
from sklearn.decomposition import PCA
import warnings
warnings.filterwarnings(action='ignore')
from numpy.random import seed
randomState = 123
seed(randomState)
# In[ ]:
# Read in config variables
config_file = os.path.abspath(os.path.join(os.getcwd(),"../../configs", "config_Human_sample.tsv"))
params = utils.read_config(config_file)
# In[ ]:
# Load parameters
local_dir = params["local_dir"]
dataset_name = params["dataset_name"]
analysis_name = params["analysis_name"]
correction_method = params["correction_method"]
lst_num_experiments = params["lst_num_experiments"]
pca_ind = [0,1,2,-3,-2,-1]
# ## Load data
# In[2]:
# File directories
base_dir = os.path.abspath(
os.path.join(
os.getcwd(), "../.."))
similarity_uncorrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_sample_lvl_sim_similarity_uncorrected_"+correction_method+".pickle")
ci_uncorrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_sample_lvl_sim_ci_uncorrected_"+correction_method+".pickle")
similarity_corrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_sample_lvl_sim_similarity_corrected_"+correction_method+".pickle")
ci_corrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_sample_lvl_sim_ci_corrected_"+correction_method+".pickle")
permuted_score_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_sample_lvl_sim_permuted.npy")
compendia_dir = os.path.join(
local_dir,
"experiment_simulated",
analysis_name)
# In[3]:
# Output files
svcca_file = os.path.join(
base_dir,
"results",
dataset_name +"_sample_lvl_sim_svcca_"+correction_method+".svg")
svcca_png_file = os.path.join(
base_dir,
"results",
dataset_name +"_sample_lvl_sim_svcca_"+correction_method+".png")
pca_uncorrected_file = os.path.join(
base_dir,
"results",
dataset_name +"_sample_lvl_sim_pca_uncorrected_"+correction_method+".png")
pca_corrected_file = os.path.join(
base_dir,
"results",
dataset_name +"_sample_lvl_sim_pca_corrected_"+correction_method+".png")
# In[4]:
# Load pickled files
uncorrected_svcca = pd.read_pickle(similarity_uncorrected_file)
err_uncorrected_svcca = pd.read_pickle(ci_uncorrected_file)
corrected_svcca = pd.read_pickle(similarity_corrected_file)
err_corrected_svcca = | pd.read_pickle(ci_corrected_file) | pandas.read_pickle |
#!/usr/bin/env python
# coding: utf-8
# ### Phase 3: Conduct EDA and classification model construction using the master_math.csv file that contains all relevant features and target variable
# #### Importing all necessary libraries
# In[151]:
import pandas
pandas.__version__
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, cross_validate
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.gaussian_process.kernels import RBF
from sklearn.model_selection import learning_curve
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold, GridSearchCV, learning_curve, cross_val_predict
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer, make_column_transformer
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
from scipy import stats
import warnings
warnings.filterwarnings('ignore')
# In[152]:
cd /Users/dansa/Documents/GitHub/Phase1/Data/MASTER
# #### Loading the data and reformatting the school id column
# In[153]:
master_math_new = pandas.read_csv("master_math.csv")
master_math_new['NCESSCH'] = master_math_new['NCESSCH'].apply(lambda x: '{0:0>12}'.format(x))
master_math_new.head()
# In[154]:
master_math_new.shape
# #### Inspecting the data file
# In[155]:
master_math_new.columns
# Create a data frame with only the needed columns for further analysis
# In[156]:
math=pd.DataFrame(master_math_new, columns=[ 'NCESSCH', 'NAME', 'SCH_TYPE_x',
'TITLEI_STATUS','TEACHERS', 'FARMS_COUNT', 'Total_enroll_students',
'SCH_FTETEACH_TOT', 'SCH_FTETEACH_CERT', 'SCH_FTETEACH_NOTCERT',
'FTE_teachers_count', 'SalaryforTeachers', 'Total_SAT_ACT_students',
'SCH_IBENR_IND_new', 'Total_IB_students', 'SCH_APENR_IND_new',
'SCH_APCOURSES', 'SCH_APMATHENR_IND_new','Total_AP_math_students',
'Total_students_tookAP', 'SCH_MATHCLASSES_ALG', 'SCH_MATHCERT_ALG',
'Total_Alg1_enroll_students', 'Total_Alg1_pass_students',
'Income_Poverty_ratio','ALL_MTH00PCTPROF_1718_new'])
# In[157]:
math.head()
# ##### Rename columns
# In[158]:
math.rename(columns={'NCESSCH':'School_ID', 'SCH_TYPE_x':'School_type','FARMS_COUNT':'No.FARMS_students',
'SCH_FTETEACH_TOT':'FTE_teachcount','SCH_FTETEACH_CERT':'Certified_FTE_teachers','SCH_FTETEACH_NOTCERT':
'Noncertified_FTE_teachers','Total_SAT_ACT_students':'Students_participate_SAT_ACT','SCH_IBENR_IND_new':'IB_Indicator',
'SCH_APENR_IND_new':'AP_Indicator','SCH_APCOURSES':'No.ofAP_courses_offer','SCH_APMATHENR_IND_new':'Students_enroll_inAPMath?',
'SCH_MATHCLASSES_ALG':'No.ofAlg1classes','SCH_MATHCERT_ALG':'Alg1_taught_by_certmathteahcers',
'ALL_MTH00PCTPROF_1718_new':'Percent_Math_Proficient'}, inplace=True)
# In[159]:
math.describe().T
# ##### IB has some missing values, lets clean than up
# In[160]:
counts = math['IB_Indicator'].value_counts().to_dict()
print (counts)
# In[161]:
math=math[math['IB_Indicator']!=-6]
# In[162]:
counts = math['IB_Indicator'].value_counts().to_dict()
print (counts)
# ##### Let take a closer look at the dataframe and datatypes
# In[163]:
print(math.info())
# We have 13,799 entries and no null values in any column. There are 26 columns, but we can drop the school_id and name and we'll want to split off the Percent_Math_Proficient.
# The object type features should be strings.
#
# Let's take a quick look at some of the data.
# In[164]:
math.hist(bins=50, figsize=(20,15))
plt.show()
# We can see that some features have most of their instances at or near zero and relatively few instances at higher values, in some cases much higher. Other features cluster close to zero and have long tails. We also see the percent_math_proficient is almost normally distributed.
# In[165]:
math_EDA=math[['NAME','Income_Poverty_ratio','Percent_Math_Proficient']]
# In[166]:
highest_proficiency=math_EDA.sort_values(by=['Percent_Math_Proficient'], inplace=True, ascending=False)
# In[167]:
math_EDA.head()
# In[168]:
math_high = math_EDA.head(5)
math_high.shape
# In[169]:
plt.style.use('ggplot')
plt.barh(math_high.NAME, math_high.Percent_Math_Proficient, color='green')
plt.ylabel("School Names")
plt.xlabel("Percent Math Proficient")
plt.title("Top 5 high schools with highest percent math proficiency")
plt.xlim(0.0, 100.0)
plt.show()
# In[170]:
lowest_proficiency=math_EDA.sort_values(by=['Percent_Math_Proficient'], inplace=True, ascending=True)
# In[171]:
math_EDA.head()
# In[172]:
math_low = math_EDA.head(5)
math_low.shape
# In[173]:
plt.style.use('ggplot')
plt.barh(math_low.NAME, math_low.Percent_Math_Proficient, color='green')
plt.ylabel("School Names")
plt.xlabel("Percent Math Proficient")
plt.title("Top 5 high schools with highest percent math proficiency")
plt.xlim(0.0, 100.0)
plt.show()
# In[174]:
sns.set_style('darkgrid')
_plt = sns.countplot(x='TITLEI_STATUS', data=math)
_plt.set_title('School Title I status')
_plt.set_xticklabels(['Title I schoolwide school','Not a Title I school','Title I schoolwide eligible- Title I targeted assistance program','Title I schoolwide eligible school-No program','Title I targeted assistance eligible school– No program','Title I targeted assistance school'])
_plt.set_xticklabels(_plt.get_xticklabels(), rotation=45, horizontalalignment='right')
plt.savefig('/Users/dansa/Documents/Title1_M_dist.png', dpi=300, bbox_inches='tight')
plt.show()
# Most of the high schools appear to be Title 1 schoolwide or not Title 1 funded schools
# ##### Let's look at the distribution of the proficiency percentages
# In[175]:
# ax = sns.distplot(math['Percent_Math_Proficient'], bins=20, kde=False, fit=stats.norm);
# # Get the fitted parameters used by sns
# (mu, sigma) = stats.norm.fit(math['Percent_Math_Proficient'])
# #print 'mu={0}, sigma={1}'.format(mu, sigma)
# # Legend and labels
# plt.legend(["normal dist. fit ($\mu=${0:.2g}, $\sigma=${1:.2f})".format(mu, sigma)])
# plt.ylabel('Frequency')
# # Cross-check this is indeed the case - should be overlaid over black curve
# x_dummy = np.linspace(stats.norm.ppf(0.01), stats.norm.ppf(0.99), 100)
# ax.plot(x_dummy, stats.norm.pdf(x_dummy, mu, sigma))
# plt.legend(["normal dist. fit ($\mu=${0:.2g}, $\sigma=${1:.2f})".format(mu, sigma),
# "cross-check"])
# In[176]:
from scipy.stats import norm
# Plot Histogram
sns.distplot(math['Percent_Math_Proficient'] , bins=20, kde=False, fit=stats.norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(math['Percent_Math_Proficient'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('Proficiency distribution')
fig = plt.figure()
res = stats.probplot(math['Percent_Math_Proficient'], plot=plt)
plt.show()
print("Skewness: %f" % math['Percent_Math_Proficient'].skew())
print("Kurtosis: %f" % math['Percent_Math_Proficient'].kurt())
# #### Lets find the percent of certified and non-certified teachers
# In[177]:
math['Pct_certified_teachers']=(math['Certified_FTE_teachers']/math['FTE_teachcount']*100)
# In[178]:
math['Pct_noncertified_teachers']=(math['Noncertified_FTE_teachers']/math['FTE_teachcount']*100)
# #### Lets find the salary per FTE in each school
# In[179]:
math['Salary_perFTE_teacher'] = math['SalaryforTeachers']/math['FTE_teachers_count']
# In[180]:
math['IPR_estimate'] = math['Income_Poverty_ratio'] #Income poverty ratio is reported as a percent
# ##### Lets drop the unwanted columns
# In[181]:
math_clean=math.drop(['School_ID','NAME','Certified_FTE_teachers', 'Noncertified_FTE_teachers','FTE_teachcount','FTE_teachers_count','SalaryforTeachers','Income_Poverty_ratio' ], axis=1)
# In[182]:
math_clean.info()
# ##### Change school type from int to float
# In[183]:
math_clean['School_type'] = math_clean['School_type'].astype(float)
# In[184]:
math_clean.describe()
# ##### Check for missing or null values
# In[185]:
sns.heatmap(math_clean.isnull(),yticklabels=False,cbar=True,cmap='viridis')
# In[186]:
math_clean.shape
# #### Let's create Labels for Math Proficiency based on the percent distribution of the schools
# In[187]:
math_clean[['Percent_Math_Proficient']].describe()
# In[188]:
math_clean['Percent_Math_Proficient'].plot(kind='hist')
# In[189]:
boxplot = math_clean.boxplot(column=['Percent_Math_Proficient'])
boxplot.plot()
plt.show()
# In[190]:
mu = 200
sigma = 25
n_bins = 5
fig, ax = plt.subplots(figsize=(8, 4))
# plot the cumulative histogram
n, bins, patches = ax.hist(math_clean.Percent_Math_Proficient, n_bins, density=True, histtype='step',
cumulative=True, label='Empirical')
# Add a line showing the expected distribution.
y = ((1 / (np.sqrt(2 * np.pi) * sigma)) *
np.exp(-0.5 * (1 / sigma * (bins - mu))**2))
y = y.cumsum()
y /= y[-1]
ax.plot(bins, y, 'k--', linewidth=1.5, label='Theoretical')
# Overlay a reversed cumulative histogram.
ax.hist(math_clean.Percent_Math_Proficient, bins=bins, density=True, histtype='step', cumulative=-1,
label='Reversed emp.')
# tidy up the figure
ax.grid(True)
ax.legend(loc='right')
ax.set_title('Cumulative step histograms')
ax.set_xlabel('Percent Math Proficiency')
ax.set_ylabel('Likelihood of occurrence')
plt.show()
# In[191]:
# getting data of the histogram
count, bins_count = np.histogram(math_clean.Percent_Math_Proficient, bins=10)
# finding the Probability Distribution Function of the histogram using count values
pdf = count / sum(count)
# using numpy np.cumsum to calculate the Cumulative Distribution Function
# We can also find using the PDF values by looping and adding
cdf = np.cumsum(pdf)
# plotting PDF and CDF
plt.plot(bins_count[1:], pdf, color="red", label="PDF")
plt.plot(bins_count[1:], cdf, label="CDF")
plt.legend()
# In[192]:
fig, ax = plt.subplots()
math_clean['Percent_Math_Proficient'].hist(bins=30, color='#A9C5D3',
edgecolor='black', grid=False)
ax.set_title('Percent of Schools with at or above Math Proficiency Histogram', fontsize=12)
ax.set_xlabel('Percent of Math Proficiency', fontsize=12)
ax.set_ylabel('Frequency', fontsize=12)
#Reference: https://towardsdatascience.com/understanding-feature-engineering-part-1-continuous-numeric-data-da4e47099a7b
# In[193]:
quantile_list = [0, .25, .5, .75, 1.]
quantiles = math_clean['Percent_Math_Proficient'].quantile(quantile_list)
quantiles
# In[194]:
fig, ax = plt.subplots()
math_clean['Percent_Math_Proficient'].hist(bins=30, color='#A9C5D3',
edgecolor='black', grid=False)
for quantile in quantiles:
qvl = plt.axvline(quantile, color='r')
ax.legend([qvl], ['Quantiles'], fontsize=10)
ax.set_title('Percentages of Math Proficiency across all High Schools', fontsize=12)
ax.set_xlabel('Percent of Math Proficiency', fontsize=12)
ax.set_ylabel('Frequency', fontsize=12)
# In[195]:
pd.qcut(math_clean['Percent_Math_Proficient'], q=4, precision = 0).value_counts(ascending=True)
# In[196]:
quantile_labels = ['Low', 'Moderate','High','Very High']
quantile_numeric = [1,2,3,4]
math_clean['Percent_Math_Proficient_quantile_range'] = pd.qcut(
math_clean['Percent_Math_Proficient'],
q=quantile_list)
math_clean['Percent_Math_Proficient_quantile_label'] = pd.qcut(
math_clean['Percent_Math_Proficient'],
q=quantile_list,
labels=quantile_labels)
math_clean['Percent_Math_Proficient_quantile_encoded'] = pd.qcut(
math_clean['Percent_Math_Proficient'],
q=quantile_list,
labels=quantile_numeric,
precision=0)
math_clean.head()
# In[197]:
math_clean['Percent_Math_Proficient_quantile_label'].value_counts(ascending=True)
# In[198]:
math_clean['Percent_Math_Proficient_quantile_encoded'] = math_clean['Percent_Math_Proficient_quantile_encoded'].astype(float)
# ### Looking for Correlations and Visualizing
# We should calculate data correlations and plot a scatter matrix.
#
# For training the ML models, we'll want to separate the Percent_Math_Proficient from the rest of the data. But for investigating correlations, we'll want to include the target.
# In[199]:
math_clean1=math_clean[['School_type', 'TITLEI_STATUS', 'TEACHERS', 'No.FARMS_students',
'Total_enroll_students', 'Students_participate_SAT_ACT', 'IB_Indicator',
'Total_IB_students', 'AP_Indicator', 'No.ofAP_courses_offer',
'Students_enroll_inAPMath?', 'Total_AP_math_students','Total_students_tookAP',
'No.ofAlg1classes','Alg1_taught_by_certmathteahcers', 'Total_Alg1_enroll_students','Total_Alg1_pass_students',
'Pct_certified_teachers', 'Pct_noncertified_teachers',
'Salary_perFTE_teacher', 'IPR_estimate','Percent_Math_Proficient_quantile_encoded']]
# In[200]:
math_clean1.dtypes
# In[201]:
correlation_matrix = math_clean1.corr()
# In[202]:
correlation_matrix['Percent_Math_Proficient_quantile_encoded'].sort_values(ascending=False)
# It seems like a few features (IPR_estimate, Total_students_tookAP, Total_AP_math_students have a weak to moderate positive correlation to the target (Percent_Math_Proficient), and a couple are somewhat negatively correlated (School_type).
#
# * IPR_estimate is the Neighborhood Income Poverty Ratio.
# * Total_students_tookAP is the count of students who the AP exam.
# * Total_AP_math_students is the number of students who took an AP math course.
# * School_type is refers to whether the school is a "1-Regular School, 2-Special Education School, 3-Career and Technical School and 4-Alternative Education School"
#
# We can look at a heatmap of the correlations of all numeric features to visualize which features are correlated.
# In[203]:
# correlation matrix heatmap
plt.figure(figsize=(28,15))
corr_heatmap = sns.heatmap(correlation_matrix, annot=True, linewidths=0.2, center=0, cmap="RdYlGn")
corr_heatmap.set_title('Correlation Heatmap')
plt.savefig('/Users/dansa/Documents/corr_heatmap.png', dpi=300, bbox_inches='tight')
# In[204]:
#test
corr_pairs = {}
feats = correlation_matrix.columns
for x in feats:
for y in feats:
if x != y and np.abs(correlation_matrix[x][y]) >= 0.7: # which pairs are strongely correlated?
if (y, x) not in corr_pairs.keys():
corr_pairs[(x, y)] = correlation_matrix[x][y]
# In[205]:
corr_pairs
# In[206]:
attrs = ['IPR_estimate','Total_AP_math_students','Total_students_tookAP','Percent_Math_Proficient_quantile_encoded']
# In[207]:
sns.set(style='ticks', color_codes=True)
_ = sns.pairplot(data=correlation_matrix[attrs], height=3, aspect=1, kind='scatter', plot_kws={'alpha':0.9})
# In[208]:
sns.jointplot(x="IPR_estimate", y="Percent_Math_Proficient_quantile_encoded", data=math_clean1)
# In[209]:
sns.pairplot(math_clean1, hue = 'Percent_Math_Proficient_quantile_encoded',vars = ['IPR_estimate','Total_students_tookAP','Total_AP_math_students','No.ofAP_courses_offer'] )
# ### ML prep
# #### Separate labels
# Let's separate out the target from the predicting features.
# In[210]:
X = math_clean1.drop('Percent_Math_Proficient_quantile_encoded', axis=1)
y = math_clean1.Percent_Math_Proficient_quantile_encoded
# In[211]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# In[212]:
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
# #### Transform Categorical Features
# Since these categorical features don't appear to have an inherent ordering, let's try encoding them as one-hot vectors for better ML performance.
# In[213]:
train_data_onehot = pd.get_dummies(X_train, columns=['TITLEI_STATUS'], prefix=['TITLEI_STATUS'])
train_data_onehot.head()
test_data_onehot = pd.get_dummies(X_test, columns=['TITLEI_STATUS'], prefix=['TITLEI_STATUS'])
# #### Scale Features
# We can check out the statistics for each feature, do they need to be normalized?
# In[214]:
train_data_onehot.describe()
# In[215]:
sc= StandardScaler()
X_train_scaled = sc.fit_transform(train_data_onehot)
X_test_scaled = sc.transform(test_data_onehot)
# In[216]:
print(X_train_scaled)
print(X_train_scaled.mean(axis=0))
# In[217]:
print(X_train_scaled.std(axis=0))
# In[218]:
X_train_std = pd.DataFrame(X_train_scaled, columns=train_data_onehot.columns)
X_test_std = pd.DataFrame(X_test_scaled, columns=test_data_onehot.columns)
# That should work better, the standard deviation for each feature is 1 and the mean is ~0.
# ### Classification models
# #### Logistic Regression
# In[219]:
lr=LogisticRegression()
# In[220]:
lr.fit(X_train_std,y_train)
lr_pred=lr.predict(X_test_std)
# In[221]:
print("Predicted Levels: ",list(lr_pred[:10]))
print("Actual Levels: ",list(y_test[:10]))
# In[222]:
intercept = lr.intercept_
# In[223]:
coefficients = lr.coef_
# In[224]:
coef_list = list(coefficients[0,:])
# In[225]:
coef_df = pd.DataFrame({'Feature': list(X_train_std.columns),
'Coefficient': coef_list})
print(coef_df)
# In[226]:
predicted_prob = lr.predict_proba(X_test_std)[:,1]
# In[227]:
cm = pd.DataFrame(confusion_matrix(y_test, lr_pred))
cm['Total'] = np.sum(cm, axis=1)
cm = cm.append(np.sum(cm, axis=0), ignore_index=True)
cm.columns = ['1','2','3','4', 'Total']
cm = cm.set_index([['1','2','3','4', 'Total']])
print(cm)
# In[228]:
print(classification_report(y_test, lr_pred))
# #### SVC
# In[229]:
from sklearn import svm
svc = svm.SVC(kernel='linear')
# In[230]:
svc.fit(X_train_std,y_train)
svc_pred=svc.predict(X_test_std)
# In[231]:
print("Predicted Levels: ",list(svc_pred[:10]))
print("Actual Levels: ",list(y_test[:10]))
# In[232]:
def f_importances(coef, names, top=-1):
imp = coef
imp, names = zip(*sorted(list(zip(imp, names))))
# Show all features
if top == -1:
top = len(names)
plt.barh(range(top), imp[::-1][0:top], align='center')
plt.yticks(range(top), names[::-1][0:top])
plt.show()
f_importances(abs(svc.coef_[0]), X_train_std.columns, top=10)
# In[233]:
print(classification_report(y_test, svc_pred))
# #### Knn Classifier
# In[234]:
knn=KNeighborsClassifier()
# In[235]:
knn.fit(X_train_std,y_train)
knn_pred=knn.predict(X_test_std)
# In[236]:
print("Predicted Levels: ",list(knn_pred[:10]))
print("Actual Levels: ",list(y_test[:10]))
# In[237]:
print(classification_report(y_test, knn_pred))
# #### Decision Trees
# In[238]:
dt=DecisionTreeClassifier()
# In[239]:
dt.fit(X_train_std,y_train)
dt_pred=dt.predict(X_test_std)
# In[240]:
print("Predicted Levels: ",list(dt_pred[:10]))
print("Actual Levels: ",list(y_test[:10]))
# In[241]:
dt_feature_imp = pd.Series(dt.feature_importances_,index=X_train_std.columns).sort_values(ascending=False)
dt_feature_imp
# In[242]:
sns.barplot(x=dt_feature_imp, y=dt_feature_imp.index)
# Add labels to your graph
plt.xlabel('Feature Importance Score')
plt.ylabel('Features')
plt.title("Visualizing Important Features")
#plt.legend()
plt.show()
# In[243]:
print(classification_report(y_test, dt_pred))
# #### Gaussian RandomForestClassifier
# In[244]:
rf=RandomForestClassifier(n_estimators=2000, max_depth=2)
# In[245]:
rf.fit(X_train_std,y_train)
rf_pred=rf.predict(X_test_std)
# In[246]:
print("Predicted Levels: ",list(rf_pred[:10]))
print("Actual Levels: ",list(y_test[:10]))
# In[247]:
rf_feature_imp = pd.Series(rf.feature_importances_,index=X_train_std.columns).sort_values(ascending=False)
rf_feature_imp
# In[248]:
#Feature importance for Random Forest Model
# Creating a bar plot
sns.barplot(x=rf_feature_imp, y=rf_feature_imp.index)
# Add labels to your graph
plt.xlabel('Feature Importance Score')
plt.ylabel('Features')
plt.title("Visualizing Important Features")
#plt.legend()
plt.show()
# In[249]:
print(classification_report(y_test, rf_pred, labels=[1, 2, 3, 4]))
# #### GradientBoostingClassifier
# In[250]:
#Fitting model
gb= GradientBoostingClassifier()# for parametes (n_estimators=2000, max_depth=2)
gb.fit(X_train_std,y_train)
# In[251]:
gb_pred=gb.predict(X_test_std)
# In[252]:
print("Predicted: ",list(gb_pred[:10]))
print("Actual: ",list(y_test[:10]))
# In[253]:
gb_feature_imp = pd.Series(gb.feature_importances_,index=X_train_std.columns).sort_values(ascending=False)
gb_feature_imp
# In[254]:
#Feature importance for Gradient Boosting classfier Model
# Creating a bar plot
sns.barplot(x=gb_feature_imp, y=gb_feature_imp.index)
# Add labels to your graph
plt.xlabel('Feature Importance Score')
plt.ylabel('Features')
plt.title("Visualizing Important Features for predicting math proficiency labels")
#plt.legend()
plt.show()
# In[255]:
print(classification_report(y_test, gb_pred, labels=[1, 2, 3, 4]))
# #### ExtraTreesClassifier
# In[256]:
#Fitting model
et= ExtraTreesClassifier(n_estimators=2000, max_depth=2)# for parametes (n_estimators=2000, max_depth=2)
et.fit(X_train_std,y_train)
# In[257]:
#making predictions
et_pred=et.predict(X_test_std)
# In[258]:
print("Predicted Levels: ",list(et_pred[:10]))
print("Actual Levels: ",list(y_test[:10]))
# In[259]:
et_feature_imp = pd.Series(et.feature_importances_,index=X_train_std.columns).sort_values(ascending=False)
et_feature_imp
# In[260]:
sns.barplot(x=et_feature_imp, y=et_feature_imp.index)
# Add labels to your graph
plt.xlabel('Feature Importance Score')
plt.ylabel('Features')
plt.title("Visualizing Important Features")
#plt.legend()
plt.show()
# In[261]:
print(classification_report(y_test, et_pred, labels=[1, 2, 3, 4]))
# #### Key metric summary from the various models and cross validation accuracy
# In[262]:
#Computing accuracy
print("Accuracy of Logistic Regression model: %.2f" % accuracy_score(y_test,lr_pred))
print("Accuracy of SVC model: %.2f" % accuracy_score(y_test,svc_pred))
print("Accuracy of KNN model: %.2f" % accuracy_score(y_test,knn_pred))
print("Accuracy of Decision Trees model: %.2f" % accuracy_score(y_test,dt_pred))
print("Accuracy of Random forest classfier model: %.2f" % accuracy_score(y_test,rf_pred))
print("Accuracy of Gradient Boosting classfier model: %.2f" % accuracy_score(y_test,gb_pred))
print("Accuracy of Extra trees classfier model: %.2f" % accuracy_score(y_test,et_pred))
# In[263]:
#Computing precision
print("Logistic Regression model: %.2f" % precision_score(y_test,lr_pred,average='weighted'))
print("SVC model: %.2f" % precision_score(y_test,svc_pred,average='weighted'))
print("KNN model: %.2f" % precision_score(y_test,knn_pred, average='weighted'))
print("Decision Trees: %.2f" % precision_score(y_test,dt_pred, average='weighted'))
print("Random forest classfier model: %.2f" % precision_score(y_test,rf_pred, average='weighted'))
print("Gradient Boosting classfier model: %.2f" % precision_score(y_test,gb_pred, average='weighted'))
print("Extra trees classfier model: %.2f" % precision_score(y_test,et_pred, average='weighted'))
# In[264]:
#Computing recall
print("Logistic Regression model: %.2f" % recall_score(y_test,lr_pred,average='weighted'))
print("SVC model: %.2f" % recall_score(y_test,svc_pred,average='weighted'))
print("KNN model: %.2f" % recall_score(y_test,knn_pred,average='weighted'))
print("Decision Trees: %.2f" % recall_score(y_test,dt_pred,average='weighted'))
print("Random forest classfier model: %.2f" % recall_score(y_test,rf_pred,average='weighted'))
print("Gradient Boosting classfier model: %.2f" % recall_score(y_test,gb_pred,average='weighted'))
print("Extra trees classfier model: %.2f" % recall_score(y_test,et_pred,average='weighted'))
# In[265]:
#Computing f1 score
print("Logistic Regression model: %.2f" % f1_score(y_test,lr_pred,average='weighted'))
print("SVC model: %.2f" % f1_score(y_test,svc_pred,average='weighted'))
print("KNN model: %.2f" % f1_score(y_test,knn_pred,average='weighted'))
print("Decision Trees: %.2f" % f1_score(y_test,dt_pred,average='weighted'))
print("Random forest classfier model: %.2f" % f1_score(y_test,rf_pred,average='weighted'))
print("Gradient Boosting classfier model: %.2f" % f1_score(y_test,gb_pred,average='weighted'))
print("Extra trees classfier model: %.2f" % f1_score(y_test,et_pred,average='weighted'))
# In[271]:
Results = {'Model Name': ['Logistic Regression', 'SVC','KNN','Decision Trees','Random Forest','Gradient boosting','Extra Trees'],
'Accuracy': ['0.44','0.42','0.42','0.38','0.42','0.49','0.38'],
'Precision': ['0.42','0.43','0.42','0.39','0.45','0.48','0.30'],
'Recall': ['0.44','0.42','0.42','0.38','0.42','0.49','0.38'],
'F1score': ['0.42','0.38','0.42','0.38','0.38','0.47','0.33']
}
Summary = | pd.DataFrame (Results, columns = ['Model Name','Accuracy','Precision','Recall','F1score']) | pandas.DataFrame |
import gc
import numpy as np
from pandas import (
DatetimeIndex,
Float64Index,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
date_range,
)
from .pandas_vb_common import tm
class SetOperations:
params = (
["datetime", "date_string", "int", "strings"],
["intersection", "union", "symmetric_difference"],
)
param_names = ["dtype", "method"]
def setup(self, dtype, method):
N = 10 ** 5
dates_left = date_range("1/1/2000", periods=N, freq="T")
fmt = "%Y-%m-%d %H:%M:%S"
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
data = {
"datetime": {"left": dates_left, "right": dates_left[:-1]},
"date_string": {"left": date_str_left, "right": date_str_left[:-1]},
"int": {"left": int_left, "right": int_left[:-1]},
"strings": {"left": str_left, "right": str_left[:-1]},
}
self.left = data[dtype]["left"]
self.right = data[dtype]["right"]
def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
class SetDisjoint:
def setup(self):
N = 10 ** 5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Range:
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10 ** 6, step=3)
self.idx_dec = RangeIndex(start=10 ** 6, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
def time_get_loc_inc(self):
self.idx_inc.get_loc(900_000)
def time_get_loc_dec(self):
self.idx_dec.get_loc(100_000)
def time_iter_inc(self):
for _ in self.idx_inc:
pass
def time_iter_dec(self):
for _ in self.idx_dec:
pass
class IndexEquals:
def setup(self):
idx_large_fast = RangeIndex(100_000)
idx_small_slow = date_range(start="1/1/2012", periods=1)
self.mi_large_slow = MultiIndex.from_product([idx_large_fast, idx_small_slow])
self.idx_non_object = RangeIndex(1)
def time_non_object_equals_multiindex(self):
self.idx_non_object.equals(self.mi_large_slow)
class IndexAppend:
def setup(self):
N = 10_000
self.range_idx = RangeIndex(0, 100)
self.int_idx = self.range_idx.astype(int)
self.obj_idx = self.int_idx.astype(str)
self.range_idxs = []
self.int_idxs = []
self.object_idxs = []
for i in range(1, N):
r_idx = RangeIndex(i * 100, (i + 1) * 100)
self.range_idxs.append(r_idx)
i_idx = r_idx.astype(int)
self.int_idxs.append(i_idx)
o_idx = i_idx.astype(str)
self.object_idxs.append(o_idx)
def time_append_range_list(self):
self.range_idx.append(self.range_idxs)
def time_append_int_list(self):
self.int_idx.append(self.int_idxs)
def time_append_obj_list(self):
self.obj_idx.append(self.object_idxs)
class Indexing:
params = ["String", "Float", "Int"]
param_names = ["dtype"]
def setup(self, dtype):
N = 10 ** 6
self.idx = getattr(tm, f"make{dtype}Index")(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
self.non_unique = self.idx[:half].append(self.idx[:half])
self.non_unique_sorted = (
self.sorted[:half].append(self.sorted[:half]).sort_values()
)
self.key = self.sorted[N // 4]
def time_boolean_array(self, dtype):
self.idx[self.array_mask]
def time_boolean_series(self, dtype):
self.idx[self.series_mask]
def time_get(self, dtype):
self.idx[1]
def time_slice(self, dtype):
self.idx[:-1]
def time_slice_step(self, dtype):
self.idx[::2]
def time_get_loc(self, dtype):
self.idx.get_loc(self.key)
def time_get_loc_sorted(self, dtype):
self.sorted.get_loc(self.key)
def time_get_loc_non_unique(self, dtype):
self.non_unique.get_loc(self.key)
def time_get_loc_non_unique_sorted(self, dtype):
self.non_unique_sorted.get_loc(self.key)
class Float64IndexMethod:
# GH 13166
def setup(self):
N = 100_000
a = np.arange(N)
self.ind = Float64Index(a * 4.8000000418824129e-08)
def time_get_loc(self):
self.ind.get_loc(0)
class IntervalIndexMethod:
# GH 24813
params = [10 ** 3, 10 ** 5]
def setup(self, N):
left = np.append(np.arange(N), np.array(0))
right = np.append(np.arange(1, N + 1), np.array(1))
self.intv = IntervalIndex.from_arrays(left, right)
self.intv._engine
self.intv2 = | IntervalIndex.from_arrays(left + 1, right + 1) | pandas.IntervalIndex.from_arrays |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.