prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import math
import string
from typing import Optional, Sequence, Tuple
import hypothesis.strategies as st
import numpy as np
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
import fletcher as fr
from fletcher.testing import examples
try:
# Only available in pandas 1.2+
# When this class is defined, we can also use `.str` on fletcher columns.
from pandas.core.strings.object_array import ObjectStringArrayMixin # noqa F401
_str_accessors = ["str", "fr_str"]
except ImportError:
_str_accessors = ["fr_str"]
@pytest.fixture(params=_str_accessors, scope="module")
def str_accessor(request):
return request.param
@st.composite
def string_patterns_st(draw, max_len=50) -> Tuple[Sequence[Optional[str]], str, int]:
ab_charset_st = st.sampled_from("ab")
ascii_charset_st = st.sampled_from(string.ascii_letters)
charset_st = st.sampled_from((ab_charset_st, ascii_charset_st))
charset = draw(charset_st)
fixed_pattern_st = st.sampled_from(["a", "aab", "aabaa"])
generated_pattern_st = st.text(alphabet=charset, max_size=max_len)
pattern_st = st.one_of(fixed_pattern_st, generated_pattern_st)
pattern = draw(pattern_st)
min_str_size = 0 if len(pattern) > 0 else 1
raw_str_st = st.one_of(
st.none(), st.lists(charset, min_size=min_str_size, max_size=max_len)
)
raw_seq_st = st.lists(raw_str_st, max_size=max_len)
raw_seq = draw(raw_seq_st)
for s in raw_seq:
if s is None:
continue
"""
There seems to be a bug in pandas for this edge case
>>> pd.Series(['']).str.replace('', 'abc', n=1)
0
dtype: object
But
>>> pd.Series(['']).str.replace('', 'abc')
0 abc
dtype: object
I believe the second result is the correct one and this is what the
fletcher implementation returns.
"""
max_ind = len(s) - len(pattern)
if max_ind < 0:
continue
repl_ind_st = st.integers(min_value=0, max_value=max_ind)
repl_ind_list_st = st.lists(repl_ind_st, max_size=math.ceil(max_len / 10))
repl_ind_list = draw(repl_ind_list_st)
for j in repl_ind_list:
s[j : j + len(pattern)] = pattern
seq = ["".join(s) if s is not None else None for s in raw_seq]
offset = draw(st.integers(min_value=0, max_value=len(seq)))
return (seq, pattern, offset)
string_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", "bb", None], "a"),
(["aa", "ab", "ba", "bb", None], "A"),
(["aa", "ab", "bA", "bB", None], "a"),
(["aa", "AB", "ba", "BB", None], "A"),
],
)
def _fr_series_from_data(data, fletcher_variant, dtype=pa.string()):
arrow_data = pa.array(data, type=dtype)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
return pd.Series(fr_array)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_cat(data, str_accessor, fletcher_variant, fletcher_variant_2):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
ser_fr = _fr_series_from_data(data, fletcher_variant)
ser_fr_other = _fr_series_from_data(data, fletcher_variant_2)
result_pd = ser_pd.str.cat(ser_pd)
result_fr = getattr(ser_fr, str_accessor).cat(ser_fr_other)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def _check_series_equal(result_fr, result_pd):
result_fr = result_fr.astype(result_pd.dtype)
tm.assert_series_equal(result_fr, result_pd)
def _check_str_to_t(
t, func, data, str_accessor, fletcher_variant, test_offset=0, *args, **kwargs
):
"""Check a .str. function that returns a series with type t."""
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, func)(*args, **kwargs)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
_check_series_equal(result_fr, result_pd)
def _check_str_to_str(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(str, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_bool(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(bool, func, data, str_accessor, fletcher_variant, *args, **kwargs)
@string_patterns
def test_text_endswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("endswith", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_text_startswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("startswith", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_contains_no_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=False
)
@pytest.mark.parametrize(
"data, pat, expected",
[
([], "", []),
(["a", "b"], "", [True, True]),
(["aa", "Ab", "ba", "bb", None], "a", [True, False, True, False, None]),
],
)
def test_contains_no_regex_ascii(data, pat, expected, str_accessor, fletcher_variant):
if str_accessor == "str":
pytest.skip(
"return types not stable yet, might sometimes return null instead of bool"
)
return
fr_series = _fr_series_from_data(data, fletcher_variant)
fr_expected = _fr_series_from_data(expected, fletcher_variant, pa.bool_())
# Run over slices to check offset handling code
for i in range(len(data)):
ser = fr_series.tail(len(data) - i)
expected = fr_expected.tail(len(data) - i)
result = getattr(ser, str_accessor).contains(pat, regex=False)
tm.assert_series_equal(result, expected)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
def test_contains_no_regex_case_sensitive(data_tuple, str_accessor, fletcher_variant):
data, pat, test_offset = data_tuple
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
case=True,
regex=False,
)
@string_patterns
def test_contains_no_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=False,
case=False,
)
regex_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", None], "a"),
(["aa", "ab", "ba", None], "a$"),
(["aa", "ab", "ba", None], "^a"),
(["Aa", "ab", "ba", None], "A"),
(["aa", "AB", "ba", None], "A$"),
(["aa", "AB", "ba", None], "^A"),
],
)
@regex_patterns
def test_contains_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=True
)
@regex_patterns
def test_contains_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=True,
case=False,
)
@settings(deadline=None)
@given(
data_tuple=string_patterns_st(),
n=st.integers(min_value=0, max_value=10),
repl=st.sampled_from(["len4", "", "z"]),
)
@example(
data_tuple=(["aababaa"], "aabaa", 0),
repl="len4",
n=1,
fletcher_variant="continuous",
)
@example(data_tuple=(["aaa"], "a", 0), repl="len4", n=1, fletcher_variant="continuous")
def test_replace_no_regex_case_sensitive(
data_tuple, repl, n, str_accessor, fletcher_variant
):
data, pat, test_offset = data_tuple
_check_str_to_str(
"replace",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
repl=repl,
n=n,
case=True,
regex=False,
)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
@example(data_tuple=(["a"], "", 0), fletcher_variant="chunked")
def test_count_no_regex(data_tuple, str_accessor, fletcher_variant):
"""Check a .str. function that returns a series with type t."""
data, pat, test_offset = data_tuple
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, "count")(pat=pat)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
kwargs = {}
if str_accessor.startswith("fr_"):
kwargs["regex"] = False
result_fr = getattr(ser_fr, str_accessor).count(pat=pat, **kwargs)
_check_series_equal(result_fr, result_pd)
def _optional_len(x: Optional[str]) -> int:
if x is not None:
return len(x)
else:
return 0
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_zfill(data, str_accessor, fletcher_variant):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
max_str_len = ser_pd.map(_optional_len).max()
if pd.isna(max_str_len):
max_str_len = 0
arrow_data = pa.array(data, type=pa.string())
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
ser_fr = pd.Series(fr_array)
result_pd = ser_pd.str.zfill(max_str_len + 1)
result_fr = getattr(ser_fr, str_accessor).zfill(max_str_len + 1)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
@settings(deadline=None, max_examples=3)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@examples(
example_list=[
[
" 000000000000000000000000000000000000000000İࠀࠀࠀࠀ𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐤱000000000000𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀"
],
["\x80 "],
[],
],
example_kword="data",
)
def test_text_strip_offset(str_accessor, fletcher_variant, fletcher_slice_offset, data):
_do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@examples(
example_list=[
[],
[""],
[None],
[" "],
["\u2000"],
[" a"],
["a "],
[" a "],
# https://github.com/xhochy/fletcher/issues/174
["\xa0"],
["\u2000a\u2000"],
["\u2000\u200C\u2000"],
["\n\u200C\r"],
["\u2000\x80\u2000"],
["\t\x80\x0b"],
["\u2000\u10FFFF\u2000"],
[" \u10FFFF "],
]
+ [
[c]
for c in " \t\r\n\x1f\x1e\x1d\x1c\x0c\x0b"
"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2000\u2009\u200A\u200B\u2028\u2029\u202F\u205F"
]
+ [[chr(c)] for c in range(0x32)]
+ [[chr(c)] for c in range(0x80, 0x85)]
+ [[chr(c)] for c in range(0x200C, 0x2030)]
+ [[chr(c)] for c in range(0x2060, 0x2070)]
+ [[chr(c)] for c in range(0x10FFFE, 0x110000)],
example_kword="data",
)
def test_text_strip(str_accessor, fletcher_variant, data):
_do_test_text_strip(str_accessor, fletcher_variant, 1, data)
def _do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
arrow_data = pa.array(
[None for _ in range(fletcher_slice_offset)] + data, type=pa.string()
)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
ser_fr = pd.Series(fr_array[fletcher_slice_offset:])
result_pd = ser_pd.str.strip()
result_fr = getattr(ser_fr, str_accessor).strip()
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
result_pd[result_pd.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def test_fr_str_accessor(fletcher_array):
data = ["a", "b"]
ser_pd = pd.Series(data)
# object series is returned
s = ser_pd.fr_str.encode("utf8")
assert s.dtype == np.dtype("O")
# test fletcher functionality and fallback to pandas
arrow_data = pa.array(data, type=pa.string())
fr_array = fletcher_array(arrow_data)
ser_fr = pd.Series(fr_array)
# pandas strings only method
s = ser_fr.fr_str.encode("utf8")
assert isinstance(s.values, fr.FletcherBaseArray)
def test_fr_str_accessor_fail(fletcher_variant):
data = [1, 2]
ser_pd = pd.Series(data)
with pytest.raises(Exception):
ser_pd.fr_str.startswith("a")
@pytest.mark.parametrize("regex", ["([0-9]+)", "([0-9]+)\\+([a-z]+)*"])
@pytest.mark.parametrize(
"data", [["123+"], ["123+a"], ["123+a", "123+"], ["123+", "123+a"]]
)
def test_text_extractall(str_accessor, fletcher_variant, data, regex):
if str_accessor == "str":
pytest.skip("extractall is not yet dispatched to the ExtensionArray")
return
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).extractall(regex)
assert isinstance(result_fr[0].dtype, fr.FletcherBaseDtype)
ser_pd = pd.Series(data)
result_pd = ser_pd.str.extractall(regex)
tm.assert_frame_equal(result_pd, result_fr.astype(object))
@pytest.mark.parametrize("data", [["123"], ["123+"], ["123+a+", "123+"]])
@pytest.mark.parametrize("expand", [True, False])
def test_text_split(str_accessor, fletcher_variant, data, expand):
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).split("+", expand=expand)
ser_pd = | pd.Series(data) | pandas.Series |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
import platform
import unittest
from itertools import combinations, combinations_with_replacement, product
from numba.core.config import IS_32BITS
from numba.core.errors import TypingError
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (skip_numba_jit,
_make_func_from_text,
gen_frand_array)
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
class TestSeries_ops(TestCase):
def test_series_operators_int(self):
"""Verifies using all various Series arithmetic binary operators on two integer Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
# integers to negative powers are not allowed
if (operator == '**' and np.any(data_right < 0)):
data_right = np.abs(data_right)
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_int_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on an integer Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
scalar_values = [1, -1, 0, 3, 7, -5]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
# integers to negative powers are not allowed
if (operator == '**' and np.any(right < 0)):
right = abs(right)
with self.subTest(left=left, right=right, operator=operator):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(left, right), test_impl(left, right), check_dtype=False)
def test_series_operators_float(self):
"""Verifies using all various Series arithmetic binary operators on two float Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_float_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on a float Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
scalar_values = [1., -1., 0., -0., 7., -5.]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
with self.subTest(left=left, right=right, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar), check_dtype=False)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
A1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
A2 = A1.copy(deep=True)
B = pd.Series(np.ones(n - 1), name='B')
hpat_func(A1, B)
test_impl(A2, B)
pd.testing.assert_series_equal(A1, A2)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace_scalar(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
S1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
S2 = S1.copy(deep=True)
hpat_func(S1, 1)
test_impl(S2, 1)
pd.testing.assert_series_equal(S1, S2)
@skip_numba_jit('operator.neg for SeriesType is not implemented in yet')
def test_series_operator_neg(self):
def test_impl(A):
return -A
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_operators_comp_numeric(self):
"""Verifies using all various Series comparison binary operators on two integer Series with various indexes"""
n = 11
data_left = [1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0]
data_right = [3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1]
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None]}
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for dtype, index_data in dtype_to_index.items():
with self.subTest(operator=operator, index_dtype=dtype, index=index_data):
A = pd.Series(data_left, index=index_data)
B = pd.Series(data_right, index=index_data)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operators_comp_numeric_scalar(self):
"""Verifies using all various Series comparison binary operators on an integer Series and scalar values"""
S = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0])
scalar_values = [2, 2.0, -3, np.inf, -np.inf, np.PZERO, np.NZERO]
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
def test_series_operators_comp_str_scalar(self):
"""Verifies using all various Series comparison binary operators on an string Series and scalar values"""
S = pd.Series(['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None])
scalar_values = ['a', 'aa', 'ab', 'ba', '']
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
@skip_numba_jit
def test_series_operators_inplace_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = self.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 1)
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion2(self):
def test_impl(A, B):
S = B + 2
if A.iat[0] == 0:
S = A + 1
return S + B
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 3)
def test_series_operator_add_numeric_scalar(self):
"""Verifies Series.operator.add implementation for numeric series and scalar second operand"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd']}
int_scalar = 24
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
result = hpat_func(A, int_scalar)
result_ref = test_impl(A, int_scalar)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
float_scalar = 24.0
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
ref_result = test_impl(A, float_scalar)
result = hpat_func(A, float_scalar)
pd.testing.assert_series_equal(result, ref_result, check_dtype=False, check_names=False)
def test_series_operator_add_numeric_same_index_default(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), dtype=dtype_left)
B = pd.Series(np.arange(n)**2, dtype=dtype_right)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
@skip_numba_jit
def test_series_operator_add_numeric_same_index_numeric(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with the same numeric indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_numeric_fixme(self):
""" Same as test_series_operator_add_same_index_numeric but with w/a for the problem.
Can be deleted when the latter is fixed """
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
# FIXME: skip the sub-test if one of the dtypes is float and the other is integer
if not (np.issubdtype(dtype_left, np.integer) and np.issubdtype(dtype_right, np.integer)
or np.issubdtype(dtype_left, np.float) and np.issubdtype(dtype_right, np.float)):
continue
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with the same string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(n), index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
B = pd.Series(np.arange(n)**2, index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_int(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'ae', 'b', 'ccc', 'cccc', 'oo', 's']
index_B = ['', '', 'aa', 'aa', 'cc', 'cccc', 'e', 'f', 'h', 'oo', 's']
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
@skip_numba_jit('TODO: fix Series.sort_values to handle both None and '' in string series')
def test_series_operator_add_numeric_align_index_str_fixme(self):
"""Same as test_series_operator_add_align_index_str but with None values in string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'b', 'ccc', 'cccc', 'oo', None, None]
index_B = ['', '', 'aa', 'aa', 'cccc', 'f', 'h', 'oo', 's', None, None]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_other_dtype(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with non-equal integer indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(3*n), index=np.arange(-n, 2*n, 1, dtype=np.int64))
B = pd.Series(np.arange(3*n)**2, index=np.arange(0, 3*n, 1, dtype=np.float64))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_diff_series_sizes(self):
"""Verifies implementation of Series.operator.add between two numeric Series with different sizes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
size_A, size_B = 7, 25
A = pd.Series(np.arange(size_A))
B = pd.Series(np.arange(size_B)**2)
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
def test_series_operator_add_align_index_int_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of numeric indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 20000
np.random.seed(0)
index1 = np.random.randint(-30, 30, n)
index2 = np.random.randint(-30, 30, n)
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_align_index_str_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of string indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 2000
np.random.seed(0)
valid_ids = ['', 'aaa', 'a', 'b', 'ccc', 'ef', 'ff', 'fff', 'fa', 'dddd']
index1 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
index2 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_align_index_int(self):
"""Verifies implementation of Series.operator.add between two string Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
np.random.seed(0)
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
data = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
A = pd.Series(data, index=index_A)
B = pd.Series(data, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_result_name1(self):
"""Verifies name of the Series resulting from appying Series.operator.add to different arguments"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_names = ['A', '', None, 'B']
for left_name, right_name in combinations(series_names, 2):
S1 = pd.Series(np.arange(n), name=left_name)
S2 = pd.Series(np.arange(n, 0, -1), name=right_name)
with self.subTest(left_series_name=left_name, right_series_name=right_name):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
# also verify case when second operator is scalar
scalar = 3.0
with self.subTest(scalar=scalar):
S1 = pd.Series(np.arange(n), name='A')
pd.testing.assert_series_equal(hpat_func(S1, scalar), test_impl(S1, scalar), check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_result_name2(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in returning unnamed Series when both operands are named Series with the same name"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
S1 = pd.Series(np.arange(n), name='A')
S2 = pd.Series(np.arange(n, 0, -1), name='A')
result = hpat_func(S1, S2)
result_ref = test_impl(S1, S2)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(result, result_ref, check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_series_dtype_promotion(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in dtype of resulting Series that is fixed to float64"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.array(np.arange(n), dtype=dtype_left))
B = pd.Series(np.array(np.arange(n)**2, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_add_str_scalar(self):
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
values_to_test = [' ', 'wq', '', '23']
for scalar in values_to_test:
with self.subTest(left=series_data, right=scalar):
result_ref = test_impl(S, scalar)
result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
with self.subTest(left=scalar, right=series_data):
result_ref = test_impl(scalar, S)
result = hpat_func(scalar, S)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_add_str_unsupported(self):
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
other_operands = [
1,
3.0,
pd.Series(np.arange(n)),
pd.Series([True, False, False, True, False, True, True]),
]
for operand in other_operands:
with self.subTest(right=operand):
with self.assertRaises(TypingError) as raises:
hpat_func(S, operand)
expected_msg = 'Operator add(). Not supported for not-comparable operands.'
self.assertIn(expected_msg, str(raises.exception))
def test_series_operator_mul_str_scalar(self):
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', ' ', 'cccc', 'oo', None]
S = pd.Series(series_data)
values_to_test = [-1, 0, 2, 5]
for scalar in values_to_test:
with self.subTest(left=series_data, right=scalar):
result_ref = test_impl(S, scalar)
result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
with self.subTest(left=scalar, right=series_data):
result_ref = test_impl(scalar, S)
result = hpat_func(scalar, S)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_mul_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series([-1, 2, 0, 5, 3, -5, 4])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_mul_str_align_index_int1(self):
""" Verifies implementation of Series.operator.add between two string Series
with integer indexes containg same unique values (so alignment doesn't produce NaNs) """
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
shuffled_data = np.arange(n, dtype=np.int)
np.random.shuffle(shuffled_data)
index_A = shuffled_data
np.random.shuffle(shuffled_data)
index_B = shuffled_data
str_series_values = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
int_series_values = np.random.randint(-5, 5, n)
A = pd.Series(str_series_values, index=index_A)
B = pd.Series(int_series_values, index=index_B)
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref)
@unittest.expectedFailure # pandas can't calculate this due to adding NaNs to int series during alignment
def test_series_operator_mul_str_align_index_int2(self):
""" Verifies implementation of Series.operator.add between two string Series
with integer indexes that cannot be aligned without NaNs """
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
str_series_values = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
int_series_values = np.random.randint(-5, 5, n)
A = pd.Series(str_series_values, index=index_A)
B = pd.Series(int_series_values, index=index_B)
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_mul_str_unsupported(self):
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
other_operands = [
'abc',
3.0,
pd.Series(series_data),
pd.Series([True, False, False, True, False, True, True]),
]
for operand in other_operands:
with self.subTest(right=operand):
with self.assertRaises(TypingError) as raises:
hpat_func(S, operand)
expected_msg = 'Operator mul(). Not supported between operands of types:'
self.assertIn(expected_msg, str(raises.exception))
def test_series_operator_lt_index_mismatch1(self):
"""Verifies correct exception is raised when comparing Series with non equal integer indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index1 = np.arange(n)
index2 = np.copy(index1)
np.random.shuffle(index2)
A = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0], index=index1)
B = pd.Series([3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1], index=index2)
with self.assertRaises(Exception) as context:
test_impl(A, B)
exception_ref = context.exception
self.assertRaises(type(exception_ref), hpat_func, A, B)
def test_series_operator_lt_index_mismatch2(self):
"""Verifies correct exception is raised when comparing Series of different size with default indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
A = pd.Series([1, 2, -1, 3, 4, 2])
B = pd.Series([3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1])
with self.assertRaises(Exception) as context:
test_impl(A, B)
exception_ref = context.exception
self.assertRaises(type(exception_ref), hpat_func, A, B)
@skip_numba_jit('Numba propagates different exception:\n'
'numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)\n'
'Internal error at <numba.core.typeinfer.IntrinsicCallConstraint ...\n'
'\'Signature\' object is not iterable')
def test_series_operator_lt_index_mismatch3(self):
"""Verifies correct exception is raised when comparing two Series with non-comparable indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, 2, -1, 3, 4, 2])
S2 = | pd.Series(['a', 'b', '', None, '2', 'ccc']) | pandas.Series |
import numpy as np
import pandas as pd
from scipy.stats import ttest_ind, chisquare, f_oneway, contingency
from scipy import stats
from ..findings import TTestFindings, DependenceFindings, ChiSquaredFindings, TestResult, FindingsList, AnovaFindings
import math
import itertools
def _anova(data, num_col, group_col, groups):
group_samples = []
for i in groups:
group_samples.append(data[data[group_col] == i][num_col])
test_result = f_oneway(*group_samples)
effect_size = _compute_eta_squared(*group_samples)
return test_result, effect_size
def _compute_eta_squared(*args):
# args refer to the samples for each
all_data = np.asarray(list(itertools.chain(*args)))
group_mean = [i.mean() for i in args]
group_mean = np.array(group_mean)
return group_mean.var() / all_data.var()
def _t_test(data, num_col, group_col, group_1=None, group_2=None, **kwargs):
if group_1 is None and group_2 is None:
groups = data[group_col].value_counts()
if len(groups) != 2:
raise ValueError(f"Column {group_col} has more than 2 groups")
else:
group_1 = groups.index[0]
group_2 = groups.index[1]
elif not (group_1 is not None and group_2 is not None):
raise ValueError("Please specify both group_1 and group_2")
first_sample = data[data[group_col] == group_1][num_col]
second_sample = data[data[group_col] == group_2][num_col]
test_result = ttest_ind(a = first_sample,
b = second_sample,
**kwargs)
effect_size = _compute_cohen_es(first_sample, second_sample)
return test_result, effect_size
def _compute_cohen_es(sample_1, sample_2):
cohen_es = abs(sample_1.mean() - sample_2.mean()) / sample_1.std()
return cohen_es
def _compute_phi_es(chi2, n):
return math.sqrt(chi2 / n)
def _chi_squared(data, col_1, expected=None):
# If expected is None, assuming it is about testing for equality.
obs = data[col_1].value_counts().values
test_result = chisquare(obs, expected)
effect_size = _compute_phi_es(test_result.chisq, len(data[col_1]))
return test_result, effect_size
def _chi_squared_dependence(data, col_1, col_2, groups_1, groups_2, min_sample):
if groups_1 is None:
filtered, ignored = _filter_sparse_group(data, col_1, min_sample)
if len(filtered) < 2:
raise ValueError(f"Only one group for {col_1}")
groups_1 = filtered
if groups_2 is None:
filtered, ignored = _filter_sparse_group(data, col_2, min_sample)
if len(filtered) < 2:
raise ValueError(f"Only one group for {col_2}")
groups_2 = filtered
group_1 = data[col_1]
group_1 = group_1[group_1.isin([groups_1]).index]
group_2 = data[col_2]
group_2 = group_2[group_2.isin([groups_2]).index]
vals, count = contingency.crosstab(group_1.values, group_2.values)
test_result = contingency.chi2_contingency(count)
test_result = TestResult(name='chi2 contigency',
statistic=test_result[0],
pvalue=test_result[1],
dof=test_result[2],
expected=test_result[3],
)
effect_size = _compute_phi_es(test_result.statistic, len(data[col_1]))
return test_result, effect_size
def _compare_group(data, col_1, col_2, p_value=0.05, phi_es=0.2, min_sample=20):
groups_1, ignored_1 = _filter_sparse_group(data, col_1, min_sample)
groups_2, ignored_2 = _filter_sparse_group(data, col_2, min_sample)
if len(groups_1) <= 1 or len(groups_2) <= 1:
pass
else:
test_result, effect_size = _chi_squared_dependence(data, col_1, col_2, groups_1, groups_2, min_sample)
if test_result.pvalue <= p_value and effect_size >= phi_es:
return DependenceFindings(data=data,
col_1=col_1,
col_2=col_2,
groups_1=groups_1,
groups_2=groups_2,
test_result=test_result
)
return None
def _compare_mean(data, num_col, group_col, *, cohen_es=0.2, eta=0.06, p_value=0.05, min_sample=20):
groups, ignored = _filter_sparse_group(data, group_col, min_sample)
if not ignored.empty:
print(f"Ignoring groups {list(ignored)} when comparing {num_col} and {group_col}")
if len(groups) == 1:
print(f"Skipping comparing {num_col} and {group_col}, only one group available")
elif len(groups) == 2:
group_1 = groups[0]
group_2 = groups[1]
test_result, effect_size = _t_test(data, num_col, group_col, group_1, group_2)
if test_result.pvalue <= p_value and effect_size >= cohen_es:
return TTestFindings(data=data,
group_col=group_col,
num_col=num_col,
group_1=group_1,
group_2=group_2,
test_result=test_result)
else:
test_result, effect_size = _anova(data, num_col, group_col, groups)
if test_result.pvalue <= p_value and effect_size >= eta:
return AnovaFindings(data=data,
group_col=group_col,
groups=groups,
num_col=num_col,
test_result=test_result
)
return None
def _filter_sparse_group(data, group_col, min_sample):
group_count = data[group_col].value_counts()
ignored = group_count[(group_count < min_sample)]
result = group_count.drop(ignored.index)
return result.index, ignored.index
def _auto_detect(data,
num_col,
cat_col,
cohen_es=0.2,
eta=0.06,
phi_es=0.2,
p_value=0.05,
min_sample=20,
ignore_list=None):
findings_list = []
ignore_list = [] if ignore_list is None else ignore_list
# Compare mean
for n_col, c_col in itertools.product(num_col, cat_col):
# TODO: Check if this is inefficient.
if ((n_col, c_col) in ignore_list) or ((c_col, n_col) in ignore_list):
continue
else:
findings = _compare_mean(data, n_col, c_col, cohen_es=cohen_es, eta=eta, p_value=p_value, min_sample=min_sample)
if findings is not None:
findings_list.append(findings)
# Compare dependency of two cat_col
for col_1, col_2 in itertools.combinations(cat_col, r=2):
# TODO: Check if this is inefficient.
if ((col_1, col_2) in ignore_list) or ((col_2, col_1) in ignore_list):
continue
else:
findings = _compare_group(data, col_1, col_2, p_value=p_value, phi_es=phi_es, min_sample=min_sample)
if findings is not None:
findings_list.append(findings)
return FindingsList(findings_list)
def _diff_group(data, group_col, num_col):
df_group = data.groupby(group_col)[num_col].mean().T
result = pd.DataFrame(index=df_group.index)
for i in itertools.combinations(df_group.columns, 2):
result[f"{i[0]} - {i[1]}"] = df_group[i[0]] - df_group[i[1]]
return result
def _diff(data, *args):
# TODO: Check the len of each args sample.
result = pd.DataFrame(index=pd.RangeIndex(len(args[0])))
for i in itertools.combinations(df_group.columns, 2):
result[f"{i[0]} - {i[1]}"] = df_group[i[0]] - df_group[i[1]]
return result
def _t_test_group(data, group_col, num_col, **kwargs):
test_result = dict()
for i in itertools.combinations(data[group_col].value_counts().index, r=2):
test_result[f"{i[0]} vs {i[1]}"] = ttest_ind(a = df[df[group_col] == i[0]][num_col],
b = df[df[group_col] == i[1]][num_col],
**kwargs)
return test_result
def _locate_outlier_zscore(data, columns, zscore_threshold, any=True, exclude=False):
'''
Locate outliers from numerical columns.
Arguments:
data: pandas DataFrame
columns: A list of column's names for checking outliers. Must be numerical columns
zscore_threshold: Threshold for classifying outliers.
any: If True, classify the data point as outlier if value from one of the column is a outlier.
exclude: If True, return non-outliers. If False, return outliers.
Returns pandas DataFrame.
'''
mean = data[columns].mean(axis=0)
std = data[columns].std(axis=0)
lower_bound = (std * zscore_threshold - mean).rename("Lower_bound")
upper_bound = (std * zscore_threshold + mean).rename("Upper_bound")
outlier_range = pd.concat([lower_bound, upper_bound], axis=1)
# TODO: Make this more efficient
# The above workflow is equivalent to below, at 3 decimal points
mask_include = np.abs(stats.zscore(data[columns])) > zscore_threshold
mask_exclude = np.abs(stats.zscore(data[columns])) < zscore_threshold
if any:
if exclude:
return data[mask_exclude.any(axis=1)]
else:
data = data[mask_include.any(axis=1)]
outlier_field = pd.DataFrame(mask_include, columns=columns)
outlier_field = outlier_field.apply(lambda x: x.replace(True, x.name).replace(False, ""))
outlier_field = outlier_field.apply(lambda x: x.str.cat(sep=''), axis=1)
outlier_field = outlier_field.replace("", np.nan).dropna()
outlier_field.rename("Outlier_field", inplace=True)
assert data.index.equals(outlier_field.index)
return (pd.concat([data, outlier_field], axis=1), outlier_range)
else:
if exclude:
return data[mask_exclude.all(axis=1)]
else:
data = data[mask_include.all(axis=1)]
outlier_field = | pd.DataFrame(mask_include, columns=columns) | pandas.DataFrame |
"""
Tests that rely on a server running
"""
import base64
import json
import datetime
import os
from unittest import mock
import pytest
from heavydb import connect, ProgrammingError, DatabaseError
from heavydb.cursor import Cursor
from heavydb._parsers import Description, ColumnDetails
from heavydb.thrift.ttypes import TDBException
from heavydb.common.ttypes import TDatumType
import geopandas as gpd
import pandas as pd
import numpy as np
import pyarrow as pa
from pandas.api.types import is_object_dtype, is_categorical_dtype
import pandas.testing as tm
import shapely
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
import textwrap
from .conftest import no_gpu
from .data import dashboard_metadata
heavydb_host = os.environ.get('HEAVYDB_HOST', 'localhost')
# XXX: Make it hashable to silence warnings; see if this can be done upstream
# This isn't a huge deal, but our testing context mangers for asserting
# exceptions need hashability
TDBException.__hash__ = id
def _cursor2df(cursor):
col_types = {c.name: c.type_code for c in cursor.description}
has_geodata = {
k: v
in [
TDatumType.POINT,
TDatumType.LINESTRING,
TDatumType.POLYGON,
TDatumType.MULTIPOLYGON,
]
for k, v in col_types.items()
}
col_names = list(col_types.keys())
df_class = gpd.GeoDataFrame if any(has_geodata.values()) else pd.DataFrame
df = df_class(cursor.fetchall(), columns=col_names)
for c, _has_geodata in has_geodata.items():
if _has_geodata:
df.loc[:, c] = df.loc[:, c].apply(shapely.wkt.loads)
return df
@pytest.mark.usefixtures("mapd_server")
class TestIntegration:
def test_connect_binary(self):
con = connect(
user="admin",
password='<PASSWORD>',
host=heavydb_host,
port=6274,
protocol='binary',
dbname='omnisci',
)
assert con is not None
def test_connect_http(self):
con = connect(
user="admin",
password='<PASSWORD>',
host=heavydb_host,
port=6278,
protocol='http',
dbname='omnisci',
)
assert con is not None
def test_connect_uri(self):
uri = (
'heavydb://admin:HyperInteractive@{0}:6274/omnisci?'
'protocol=binary'.format(heavydb_host)
)
con = connect(uri=uri)
assert con._user == 'admin'
assert con._password == '<PASSWORD>'
assert con._host == heavydb_host
assert con._port == 6274
assert con._dbname == 'omnisci'
assert con._protocol == 'binary'
def test_connect_uri_and_others_raises(self):
uri = (
'heavydb://admin:HyperInteractive@{0}:6274/heavyai?'
'protocol=binary'.format(heavydb_host)
)
with pytest.raises(TypeError):
connect(username='heavydb', uri=uri)
def test_invalid_sql(self, con):
with pytest.raises(ProgrammingError) as r:
con.cursor().execute("this is invalid;")
return r.match("SQL Error:")
def test_nonexistant_table(self, con):
with pytest.raises(DatabaseError) as r:
con.cursor().execute("select it from fake_table;")
r.match("Table 'FAKE_TABLE' does not exist|Object 'fake_table' not")
def test_connection_execute(self, con):
result = con.execute("drop table if exists FOO;")
result = con.execute("create table FOO (a int);")
assert isinstance(result, Cursor)
con.execute("drop table if exists FOO;")
def test_select_sets_description(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select * from stocks")
expected = [
Description('date_', 6, None, None, None, None, True),
Description('trans', 6, None, None, None, None, True),
Description('symbol', 6, None, None, None, None, True),
Description('qty', 1, None, None, None, None, True),
Description('price', 3, None, None, None, None, True),
Description('vol', 3, None, None, None, None, True),
]
assert c.description == expected
c.execute('drop table if exists stocks;')
def test_select_parametrized(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute(
'select symbol, qty from stocks where symbol = :symbol',
{'symbol': 'GOOG'},
)
result = list(c)
expected = [
('GOOG', 100),
] # noqa
assert result == expected
c.execute('drop table if exists stocks;')
def test_executemany_parametrized(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
parameters = [{'symbol': 'GOOG'}, {'symbol': "RHAT"}]
expected = [[('GOOG', 100)], [('RHAT', 100)]]
query = 'select symbol, qty from stocks where symbol = :symbol'
c = con.cursor()
result = c.executemany(query, parameters)
assert result == expected
c.execute('drop table if exists stocks;')
def test_executemany_parametrized_insert(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c = con.cursor()
c.execute("drop table if exists stocks2;")
# Create table
c.execute('CREATE TABLE stocks2 (symbol text, qty int);')
params = [{"symbol": "GOOG", "qty": 10}, {"symbol": "AAPL", "qty": 20}]
query = "INSERT INTO stocks2 VALUES (:symbol, :qty);"
result = c.executemany(query, params)
assert result == [[], []] # TODO: not sure if this is standard
c.execute("drop table stocks2;")
c.execute('drop table if exists stocks;')
@pytest.mark.parametrize(
'query, parameters',
[
('select qty, price from stocks', None),
('select qty, price from stocks where qty=:qty', {'qty': 100}),
],
)
def test_select_ipc_parametrized(self, con, query, parameters):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.select_ipc(query, parameters=parameters)
expected = pd.DataFrame(
{
"qty": np.array([100, 100], dtype=np.int32),
"price": np.array(
[35.13999938964844, 12.140000343322754], dtype=np.float32
),
}
)[['qty', 'price']]
tm.assert_frame_equal(result, expected)
c.execute('drop table if exists stocks;')
def test_select_ipc_first_n(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.select_ipc("select * from stocks", first_n=1)
assert len(result) == 1
c.execute('drop table if exists stocks;')
@pytest.mark.parametrize(
'query, parameters',
[
('select qty, price from stocks', None),
('select qty, price from stocks where qty=:qty', {'qty': 100}),
],
)
@pytest.mark.skipif(no_gpu(), reason="No GPU available")
def test_select_ipc_gpu(self, con, query, parameters):
from cudf.core.dataframe import DataFrame
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.select_ipc_gpu("select qty, price from stocks")
assert isinstance(result, DataFrame)
dtypes = dict(qty=np.int32, price=np.float32)
expected = pd.DataFrame(
[[100, 35.14], [100, 12.14]], columns=['qty', 'price']
).astype(dtypes)
result = result.to_pandas()[['qty', 'price']] # column order
pd.testing.assert_frame_equal(result, expected)
c.execute('drop table if exists stocks;')
@pytest.mark.skipif(no_gpu(), reason="No GPU available")
def test_select_text_ipc_gpu(self, con):
from cudf.core.dataframe import DataFrame
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
symbols = set(['GOOG', 'RHAT', 'IBM', 'NVDA'])
for i, sym in enumerate(symbols):
stmt = "INSERT INTO stocks VALUES ('2006-01-05_{}','BUY','{}',{},35.{},{}.1);".format( # noqa
i, sym, i, i, i
) # noqa
# insert twice so we can test
# that duplicated text values
# are deserialized properly
c.execute(stmt)
c.execute(stmt)
result = con.select_ipc_gpu(
"select trans, symbol, qty, price from stocks"
) # noqa
assert isinstance(result, DataFrame)
assert len(result) == 8
assert set(result['trans'].to_pandas()) == set(["BUY"])
assert set(result['symbol'].to_pandas()) == symbols
c.execute('drop table if exists stocks;')
@pytest.mark.skipif(no_gpu(), reason="No GPU available")
def test_select_gpu_first_n(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.select_ipc_gpu("select * from stocks", first_n=1)
assert len(result) == 1
c.execute('drop table if exists stocks;')
def test_fetchone(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select symbol, qty from stocks")
result = c.fetchone()
expected = ('RHAT', 100)
assert result == expected
c.execute('drop table if exists stocks;')
def test_fetchmany(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select symbol, qty from stocks")
result = c.fetchmany()
expected = [('RHAT', 100)]
assert result == expected
c.execute("select symbol, qty from stocks")
result = c.fetchmany(size=10)
expected = [('RHAT', 100), ('GOOG', 100)]
assert result == expected
c.execute('drop table if exists stocks;')
def test_select_dates(self, con):
c = con.cursor()
c.execute('drop table if exists dates;')
c.execute(
'create table dates (date_ DATE, datetime_ TIMESTAMP, '
'time_ TIME);'
)
i1 = (
"INSERT INTO dates VALUES ('2006-01-05','2006-01-01T12:00:00',"
"'12:00:00');"
)
i2 = (
"INSERT INTO dates VALUES ('1901-12-14','1901-12-13T20:45:53',"
"'23:59:00');"
)
c.execute(i1)
c.execute(i2)
result = list(c.execute("select * from dates"))
expected = [
(
datetime.date(2006, 1, 5),
datetime.datetime(2006, 1, 1, 12),
datetime.time(12),
),
(
datetime.date(1901, 12, 14),
datetime.datetime(1901, 12, 13, 20, 45, 53),
datetime.time(23, 59),
),
]
assert result == expected
c.execute('drop table if exists dates;')
class TestOptionalImports:
def test_select_gpu(self, con):
with mock.patch.dict(
"sys.modules", {"cudf": None, "cudf.core.dataframe": None}
):
with pytest.raises(ImportError) as m:
con.select_ipc_gpu("select * from foo;")
assert m.match("The 'cudf' package is required")
class TestExtras:
def test_get_tables(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.get_tables()
assert isinstance(result, list)
assert 'stocks' in result
c.execute('drop table if exists stocks;')
def test_get_table_details(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float, '
'exchanges TEXT [] ENCODING DICT(32));'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1,{'NYSE', 'NASDAQ', 'AMEX'});" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2,{'NYSE', 'NASDAQ'});" # noqa
c.execute(i1)
c.execute(i2)
result = con.get_table_details('stocks')
expected = [
ColumnDetails(
name='date_',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
ColumnDetails(
name='trans',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
ColumnDetails(
name='symbol',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
ColumnDetails(
name='qty',
type='INT',
nullable=True,
precision=0,
scale=0,
comp_param=0,
encoding='NONE',
is_array=False,
),
ColumnDetails(
name='price',
type='FLOAT',
nullable=True,
precision=0,
scale=0,
comp_param=0,
encoding='NONE',
is_array=False,
),
ColumnDetails(
name='vol',
type='FLOAT',
nullable=True,
precision=0,
scale=0,
comp_param=0,
encoding='NONE',
is_array=False,
),
ColumnDetails(
name='exchanges',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=True,
),
]
assert result == expected
c.execute('drop table if exists stocks;')
class TestLoaders:
@staticmethod
def check_empty_insert(result, expected):
assert len(result) == 3
assert expected[0][0] == result[0][0]
assert expected[0][2] == result[0][2]
assert abs(expected[0][1] - result[0][1]) < 1e-7 # floating point
def test_load_empty_table(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
data = [(1, 1.1, 'a'), (2, 2.2, '2'), (3, 3.3, '3')]
con.load_table("baz", data)
result = sorted(con.execute("select * from baz"))
self.check_empty_insert(result, data)
con.execute("drop table if exists baz;")
def test_load_empty_table_pandas(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
data = [(1, 1.1, 'a'), (2, 2.2, '2'), (3, 3.3, '3')]
df = pd.DataFrame(data, columns=list('abc'))
con.load_table("baz", df, method='columnar')
result = sorted(con.execute("select * from baz"))
self.check_empty_insert(result, data)
con.execute("drop table if exists baz;")
def test_load_empty_table_arrow(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
data = [(1, 1.1, 'a'), (2, 2.2, '2'), (3, 3.3, '3')]
df = pd.DataFrame(data, columns=list('abc')).astype(
{'a': 'int32', 'b': 'float32'}
)
table = pa.Table.from_pandas(df, preserve_index=False)
con.load_table("baz", table, method='arrow')
result = sorted(con.execute("select * from baz"))
self.check_empty_insert(result, data)
con.execute("drop table if exists baz;")
@pytest.mark.parametrize(
'df, table_fields',
[
pytest.param(
pd.DataFrame(
{
"a": [1, 2, 3],
"b": [1.1, 2.2, 3.3],
"c": ['a', '2', '3'],
},
),
'a int, b float, c text',
id='scalar_values',
),
pytest.param(
pd.DataFrame(
{
"a": [
np.datetime64('2010-01-01 01:01:01.001001001'),
np.datetime64('2011-01-01 01:01:01.001001001'),
np.datetime64('2012-01-01 01:01:01.001001001'),
],
},
),
'a TIMESTAMP(9)',
id='scalar_datetime_nanoseconds',
),
pytest.param(
pd.DataFrame(
{
"a": [
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e3
),
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e3
),
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e3
),
],
},
),
'a TIMESTAMP(3)',
id='scalar_datetime_ms',
),
pytest.param(
pd.DataFrame(
{
"a": [
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e6
),
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e6
),
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e6
),
],
},
),
'a TIMESTAMP(6)',
id='scalar_datetime_us',
),
pytest.param(
pd.DataFrame(
[
{'ary': [2, 3, 4]},
{'ary': [4444]},
{'ary': []},
{'ary': None},
{'ary': [2, 3, 4]},
]
),
'ary INT[]',
id='array_values',
),
pytest.param(
pd.DataFrame(
[
{'ary': [2, 3, 4], 'strtest': 'teststr'},
{'ary': None, 'strtest': 'teststr'},
{'ary': [4444], 'strtest': 'teststr'},
{'ary': [], 'strtest': 'teststr'},
{'ary': [2, 3, 4], 'strtest': 'teststr'},
]
),
'ary INT[], strtest TEXT',
id='mix_scalar_array_values_with_none_and_empty_list',
),
pytest.param(
gpd.GeoDataFrame(
{
'a': [Point(0, 0), Point(1, 1)],
'b': [
LineString([(2, 0), (2, 4), (3, 4)]),
LineString([(0, 0), (1, 1)]),
],
'c': [
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
Polygon([(0, 0), (4, 0), (4, 4), (0, 4), (0, 0)]),
],
'd': [
MultiPolygon(
[
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
Polygon(
[
(0, 0),
(4, 0),
(4, 4),
(0, 4),
(0, 0),
]
),
]
),
MultiPolygon(
[
Polygon(
[
(0, 0),
(4, 0),
(4, 4),
(0, 4),
(0, 0),
]
),
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
]
),
],
}
),
'a POINT, b LINESTRING, c POLYGON, d MULTIPOLYGON',
id='geo_values',
),
],
)
def test_load_table_columnar(self, con, tmp_table, df, table_fields):
con.execute("create table {} ({});".format(tmp_table, table_fields))
con.load_table_columnar(tmp_table, df)
result = _cursor2df(con.execute('select * from {}'.format(tmp_table)))
pd.testing.assert_frame_equal(df, result)
def test_load_infer(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
data = pd.DataFrame(
{
'a': np.array([0, 1], dtype=np.int32),
'b': np.array([1.1, 2.2], dtype=np.float32),
'c': ['a', 'b'],
}
)
con.load_table("baz", data)
con.execute("drop table if exists baz;")
def test_load_infer_bad(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
with pytest.raises(TypeError):
con.load_table("baz", [], method='thing')
con.execute("drop table if exists baz;")
def test_infer_non_pandas(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
with pytest.raises(TypeError):
con.load_table("baz", [], method='columnar')
con.execute("drop table if exists baz;")
def test_load_columnar_pandas_all(self, con):
c = con.cursor()
c.execute('drop table if exists all_types;')
create = textwrap.dedent(
'''\
create table all_types (
boolean_ BOOLEAN,
smallint_ SMALLINT,
int_ INT,
bigint_ BIGINT,
float_ FLOAT,
double_ DOUBLE,
varchar_ VARCHAR(40),
text_ TEXT,
time_ TIME,
timestamp_ TIMESTAMP,
date_ DATE
);'''
)
# skipping decimal for now
c.execute(create)
data = pd.DataFrame(
{
"boolean_": [True, False, True, False],
"smallint_": np.array([0, 1, 0, 1], dtype=np.int16),
"int_": np.array([0, 1, 0, 1], dtype=np.int32),
"bigint_": np.array([0, 1, 0, 1], dtype=np.int64),
"float_": np.array([0, 1, 0, 1], dtype=np.float32),
"double_": np.array([0, 1, 0, 1], dtype=np.float64),
"varchar_": ["a", "b", "a", "b"],
"text_": ['a', 'b', 'a', 'b'],
"time_": [
datetime.time(0, 11, 59),
datetime.time(13),
datetime.time(22, 58, 59),
datetime.time(7, 13, 43),
],
"timestamp_": [
pd.Timestamp("2016"),
pd.Timestamp("2017"),
pd.Timestamp(
'2017-11-28 23:55:59.342380', tz='US/Eastern'
),
pd.Timestamp(
'2018-11-28 23:55:59.342380', tz='Asia/Calcutta'
),
],
"date_": [
datetime.date(2016, 1, 1),
datetime.date(2017, 1, 1),
datetime.date(2017, 11, 28),
datetime.date(2018, 11, 28),
],
},
columns=[
'boolean_',
'smallint_',
'int_',
'bigint_',
'float_',
'double_',
'varchar_',
'text_',
'time_',
'timestamp_',
'date_',
],
)
con.load_table_columnar("all_types", data, preserve_index=False)
result = list(c.execute("select * from all_types"))
expected = [
(
1,
0,
0,
0,
0.0,
0.0,
'a',
'a',
datetime.time(0, 11, 59),
datetime.datetime(2016, 1, 1, 0, 0),
datetime.date(2016, 1, 1),
),
(
0,
1,
1,
1,
1.0,
1.0,
'b',
'b',
datetime.time(13, 0),
datetime.datetime(2017, 1, 1, 0, 0),
datetime.date(2017, 1, 1),
),
(
1,
0,
0,
0,
0.0,
0.0,
'a',
'a',
datetime.time(22, 58, 59),
datetime.datetime(2017, 11, 29, 4, 55, 59),
datetime.date(2017, 11, 28),
),
(
0,
1,
1,
1,
1.0,
1.0,
'b',
'b',
datetime.time(7, 13, 43),
datetime.datetime(2018, 11, 28, 18, 25, 59),
datetime.date(2018, 11, 28),
),
]
assert result == expected
c.execute('drop table if exists all_types;')
def test_load_table_columnar_arrow_all(self, con):
c = con.cursor()
c.execute('drop table if exists all_types;')
create = textwrap.dedent(
'''\
create table all_types (
boolean_ BOOLEAN,
smallint_ SMALLINT,
int_ INT,
bigint_ BIGINT,
float_ FLOAT,
double_ DOUBLE,
varchar_ VARCHAR(40),
text_ TEXT,
time_ TIME,
timestamp_ TIMESTAMP,
date_ DATE
);'''
)
# skipping decimal for now
c.execute(create)
names = [
'boolean_',
'smallint_',
'int_',
'bigint_',
'float_',
'double_',
'varchar_',
'text_',
'time_',
'timestamp_',
'date_',
]
columns = [
pa.array([True, False, None], type=pa.bool_()),
pa.array([1, 0, None]).cast(pa.int16()),
pa.array([1, 0, None]).cast(pa.int32()),
pa.array([1, 0, None]),
pa.array([1.0, 1.1, None]).cast(pa.float32()),
pa.array([1.0, 1.1, None]),
# no fixed-width string
pa.array(['a', 'b', None]),
pa.array(['a', 'b', None]),
(pa.array([1, 2, None]).cast(pa.int32()).cast(pa.time32('s'))),
pa.array(
[
datetime.datetime(2016, 1, 1, 12, 12, 12),
datetime.datetime(2017, 1, 1),
None,
]
),
pa.array(
[datetime.date(2016, 1, 1), datetime.date(2017, 1, 1), None]
),
]
table = pa.Table.from_arrays(columns, names=names)
con.load_table_arrow("all_types", table)
c.execute('drop table if exists all_types;')
def test_select_null(self, con):
con.execute("drop table if exists pymapd_test_table;")
con.execute("create table pymapd_test_table (a int);")
con.execute("insert into pymapd_test_table VALUES (1);")
con.execute("insert into pymapd_test_table VALUES (null);")
# the test
c = con.cursor()
result = c.execute("select * from pymapd_test_table")
expected = [(1,), (None,)]
assert result.fetchall() == expected
# cleanup
con.execute("drop table if exists pymapd_test_table;")
@pytest.mark.parametrize(
'df, expected',
[
(
pd.DataFrame(
{
"a": [1, 2],
"b": [1.0, 2.0],
"c": [
datetime.date(2016, 1, 1),
datetime.date(2017, 1, 1),
],
"d": [
np.datetime64("2010-01-01T01:01:01.001001001"),
np.datetime64("2011-01-01T01:01:01.001001001"),
],
}
),
{
'a': {'type': 'BIGINT', 'is_array': False},
'b': {'type': 'DOUBLE', 'is_array': False},
'c': {'type': 'DATE', 'is_array': False},
'd': {
'type': 'TIMESTAMP',
'is_array': False,
'precision': 9,
},
},
),
(
pd.DataFrame(
{
'a': [[1, 2], [1, 2], None, []],
'b': ['A', 'B', 'C', 'D'],
'c': [[1.0, 2.2], [1.0, 2.2], [], None],
'd': [
[
9007199254740991,
9007199254740992,
9007199254740993,
],
[],
None,
[
9007199254740994,
9007199254740995,
9007199254740996,
],
],
}
),
{
'a': {'type': 'BIGINT', 'is_array': True},
'b': {'type': 'STR', 'is_array': False},
'c': {'type': 'DOUBLE', 'is_array': True},
'd': {'type': 'BIGINT', 'is_array': True},
},
),
(
gpd.GeoDataFrame(
{
'a': [Point(0, 0), Point(1, 1)],
'b': [
LineString([(2, 0), (2, 4), (3, 4)]),
LineString([(0, 0), (1, 1)]),
],
'c': [
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
Polygon([(0, 0), (4, 0), (4, 4), (0, 4), (0, 0)]),
],
'd': [
MultiPolygon(
[
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
Polygon(
[
(0, 0),
(4, 0),
(4, 4),
(0, 4),
(0, 0),
]
),
]
),
MultiPolygon(
[
Polygon(
[
(0, 0),
(4, 0),
(4, 4),
(0, 4),
(0, 0),
]
),
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
]
),
],
}
),
{
'a': {'type': 'POINT', 'is_array': True},
'b': {'type': 'LINESTRING', 'is_array': True},
'c': {'type': 'POLYGON', 'is_array': True},
'd': {'type': 'MULTIPOLYGON', 'is_array': True},
},
),
],
)
def test_create_table(self, con, tmp_table, df, expected):
con.create_table(tmp_table, df)
for col in con.get_table_details(tmp_table):
assert expected[col.name]['type'] == col.type
if 'precision' in expected[col.name]:
assert expected[col.name]['precision'] == col.precision
def test_load_table_creates(self, con):
data = pd.DataFrame(
{
"boolean_": [True, False],
"smallint_cast": np.array([0, 1], dtype=np.int8),
"smallint_": np.array([0, 1], dtype=np.int16),
"int_": np.array([0, 1], dtype=np.int32),
"bigint_": np.array([0, 1], dtype=np.int64),
"float_": np.array([0, 1], dtype=np.float32),
"double_": np.array([0, 1], dtype=np.float64),
"varchar_": ["a", "b"],
"text_": ['a', 'b'],
"time_": [datetime.time(0, 11, 59), datetime.time(13)],
"timestamp1_": [pd.Timestamp("2016"), pd.Timestamp("2017")],
"timestamp2_": [
np.datetime64("2010-01-01T01:01:01.001001001"),
np.datetime64("2011-01-01T01:01:01.001001001"),
],
"date_": [
datetime.date(2016, 1, 1),
datetime.date(2017, 1, 1),
],
},
columns=[
'boolean_',
'smallint_',
'int_',
'bigint_',
'float_',
'double_',
'varchar_',
'text_',
'time_',
'timestamp1_',
'timestamp2_',
'date_',
],
)
con.execute("drop table if exists test_load_table_creates;")
con.load_table("test_load_table_creates", data, create=True)
con.execute("drop table if exists test_load_table_creates;")
def test_array_in_result_set(self, con):
# text
con.execute("DROP TABLE IF EXISTS test_lists;")
con.execute(
"CREATE TABLE IF NOT EXISTS test_lists \
(col1 TEXT, col2 TEXT[]);"
)
row = [
("row1", "{hello,goodbye,aloha}"),
("row2", "{hello2,goodbye2,aloha2}"),
]
con.load_table_rowwise("test_lists", row)
ans = con.execute("select * from test_lists").fetchall()
expected = [
('row1', ['hello', 'goodbye', 'aloha']),
('row2', ['hello2', 'goodbye2', 'aloha2']),
]
assert ans == expected
# int
con.execute("DROP TABLE IF EXISTS test_lists;")
con.execute(
"CREATE TABLE IF NOT EXISTS test_lists \
(col1 TEXT, col2 INT[]);"
)
row = [("row1", "{10,20,30}"), ("row2", "{40,50,60}")]
con.load_table_rowwise("test_lists", row)
ans = con.execute("select * from test_lists").fetchall()
expected = [('row1', [10, 20, 30]), ('row2', [40, 50, 60])]
assert ans == expected
# timestamp
con.execute("DROP TABLE IF EXISTS test_lists;")
con.execute(
"CREATE TABLE IF NOT EXISTS test_lists \
(col1 TEXT, col2 TIMESTAMP[], col3 TIMESTAMP(9));"
)
row = [
(
"row1",
"{2019-03-02 00:00:00,2019-03-02 00:00:00,2019-03-02 00:00:00}", # noqa
"2010-01-01T01:01:01.001001001",
),
(
"row2",
"{2019-03-02 00:00:00,2019-03-02 00:00:00,2019-03-02 00:00:00}", # noqa
"2011-01-01T01:01:01.001001001",
),
]
con.load_table_rowwise("test_lists", row)
ans = con.execute("select * from test_lists").fetchall()
expected = [
(
'row1',
[
datetime.datetime(2019, 3, 2, 0, 0),
datetime.datetime(2019, 3, 2, 0, 0),
datetime.datetime(2019, 3, 2, 0, 0),
],
np.datetime64("2010-01-01T01:01:01.001001001"),
),
(
'row2',
[
datetime.datetime(2019, 3, 2, 0, 0),
datetime.datetime(2019, 3, 2, 0, 0),
datetime.datetime(2019, 3, 2, 0, 0),
],
np.datetime64("2011-01-01T01:01:01.001001001"),
),
]
assert ans == expected
# date
con.execute("DROP TABLE IF EXISTS test_lists;")
con.execute(
"CREATE TABLE IF NOT EXISTS test_lists \
(col1 TEXT, col2 DATE[]);"
)
row = [
("row1", "{2019-03-02,2019-03-02,2019-03-02}"),
("row2", "{2019-03-02,2019-03-02,2019-03-02}"),
]
con.load_table_rowwise("test_lists", row)
ans = con.execute("select * from test_lists").fetchall()
expected = [
(
'row1',
[
datetime.date(2019, 3, 2),
datetime.date(2019, 3, 2),
datetime.date(2019, 3, 2),
],
),
(
'row2',
[
datetime.date(2019, 3, 2),
datetime.date(2019, 3, 2),
datetime.date(2019, 3, 2),
],
),
]
assert ans == expected
# time
con.execute("DROP TABLE IF EXISTS test_lists;")
con.execute(
"CREATE TABLE IF NOT EXISTS test_lists \
(col1 TEXT, col2 TIME[]);"
)
row = [
("row1", "{23:59:00,23:59:00,23:59:00}"),
("row2", "{23:59:00,23:59:00,23:59:00}"),
]
con.load_table_rowwise("test_lists", row)
ans = con.execute("select * from test_lists").fetchall()
expected = [
(
'row1',
[
datetime.time(23, 59),
datetime.time(23, 59),
datetime.time(23, 59),
],
),
(
'row2',
[
datetime.time(23, 59),
datetime.time(23, 59),
datetime.time(23, 59),
],
),
]
assert ans == expected
con.execute("DROP TABLE IF EXISTS test_lists;")
def test_upload_pandas_categorical_ipc(self, con):
con.execute("DROP TABLE IF EXISTS test_categorical;")
df = | pd.DataFrame({"A": ["a", "b", "c", "a"]}) | pandas.DataFrame |
import math
import scipy.stats as ss
import numpy as np
import pandas as pd
from collections import Counter
def convert(data, to):
converted = None
if to == 'array':
if isinstance(data, np.ndarray):
converted = data
elif isinstance(data, pd.Series):
converted = data.values
elif isinstance(data, list):
converted = np.array(data)
elif isinstance(data, pd.DataFrame):
converted = data.as_matrix()
elif to == 'list':
if isinstance(data, list):
converted = data
elif isinstance(data, pd.Series):
converted = data.values.tolist()
elif isinstance(data, np.ndarray):
converted = data.tolist()
elif to == 'dataframe':
if isinstance(data, pd.DataFrame):
converted = data
elif isinstance(data, np.ndarray):
converted = | pd.DataFrame(data) | pandas.DataFrame |
# created by <NAME> <EMAIL>
import os
import logging
import re
from datetime import datetime
import math
import copy
import pandas as pd
import numpy as np
import attr
import requests
from sklearn.metrics.pairwise import haversine_distances
from BuildingControlsSimulator.DataClients.DataSpec import EnergyPlusWeather
from BuildingControlsSimulator.DataClients.DataChannel import DataChannel
from BuildingControlsSimulator.Conversions.Conversions import Conversions
logger = logging.getLogger(__name__)
@attr.s(kw_only=True)
class WeatherChannel(DataChannel):
"""Client for weather data."""
epw_path = attr.ib(default=None)
epw_data = attr.ib(factory=dict)
epw_meta = attr.ib(factory=dict)
fill_epw_data = attr.ib(default=None)
# env variables
ep_tmy3_cache_dir = attr.ib()
simulation_epw_dir = attr.ib()
nrel_dev_api_key = attr.ib(default=None)
nrel_dev_email = attr.ib(default=None)
archive_tmy3_dir = attr.ib(default=None)
archive_tmy3_meta = attr.ib(default=None)
archive_tmy3_data_dir = attr.ib(default=None)
# column names
datetime_column = attr.ib(default=EnergyPlusWeather.datetime_column)
epw_columns = attr.ib(default=EnergyPlusWeather.epw_columns)
epw_meta_keys = attr.ib(default=EnergyPlusWeather.epw_meta)
epw_column_map = attr.ib(default=EnergyPlusWeather.output_rename_dict)
def make_epw_file(self, sim_config, datetime_channel, fill_epw_path=None):
"""Generate epw file in local time"""
if fill_epw_path:
if os.path.exists(fill_epw_path):
fill_epw_fname = os.path.basename(fill_epw_path)
else:
ValueError(f"fill_epw_path: {fill_epw_path} does not exist.")
else:
# attempt to get .epw data from NREL
fill_epw_path, fill_epw_fname = self.get_tmy_fill_epw(
sim_config["latitude"], sim_config["longitude"]
)
(fill_epw_data, self.epw_meta, meta_lines,) = self.read_epw(
fill_epw_path,
)
# infer if the years in the epw file are garbage from TMY
# TMY data is only valid for a period of 1 year and then it must
# be wrapped to next year if required for multi-year weather data
# the year supplied in TMY data is not a valid sequential time
# Therefore the year must be overwritten to desired year
if len(fill_epw_data["year"].unique()) > 2 and (
len(fill_epw_data["year"]) < 8762
):
# it is not possible to have full consequtive data for 3 different
# years and less than 8762 total data points.
# using the mode will handle the case of shifting data points into
# adjacent years
force_year = (
datetime_channel.data[datetime_channel.spec.datetime_column]
.dt.year.mode()
.values[0]
)
fill_epw_data = self.convert_epw_to_internal(
fill_epw_data,
force_year=force_year,
)
# lat/lon time zone is the time zone we localize with
# the datetime_channel timezone is free to be changed later
# self.time_zone = copy.deepcopy(datetime_channel.timezone)
# if self.time_zone:
_hour_offset = (
datetime_channel.timezone.utcoffset(datetime.utcnow()).total_seconds()
/ 3600
)
if self.epw_meta["TZ"] != _hour_offset:
logger.warn(
"Timezones from longitude and latitude and given epw do not match."
)
self.epw_meta["TZ"] = _hour_offset
_epw_path = None
if not fill_epw_data.empty:
_epw_path = os.path.join(
self.simulation_epw_dir,
"NREL_EPLUS" + f"_{sim_config['identifier']}" + f"_{fill_epw_fname}",
)
# fill any missing fields in epw
# need to pass in original dyd datetime column name
epw_data = self.fill_epw(
input_epw_data=self.data,
datetime_channel=datetime_channel,
fill_epw_data=fill_epw_data,
sim_config=sim_config,
)
meta_lines = self.add_epw_data_periods(
epw_data=epw_data,
meta_lines=meta_lines,
sim_config=sim_config,
)
# save to file
self.to_epw(
epw_data=epw_data,
meta_lines=meta_lines,
fpath=_epw_path,
)
self.epw_path = _epw_path
else:
logger.error("failed to retrieve .epw fill data.")
return self.epw_path
def add_epw_data_periods(self, epw_data, meta_lines, sim_config):
# add correct DATA PERIODS reference to metalines
# see https://bigladdersoftware.com/epx/docs/9-4/auxiliary-programs/energyplus-weather-file-epw-data-dictionary.html
_starting_timestamp = min(epw_data[self.datetime_column])
_ending_timestamp = max(epw_data[self.datetime_column])
# these are the fields required in DATA PERIODS
_num_data_periods = 1
_records_per_hour = int(3600 / sim_config["sim_step_size_seconds"])
_data_period_name = "data"
_start_day_of_week = _starting_timestamp.day_name()
_start_day = f"{_starting_timestamp.month}/{_starting_timestamp.day}/{_starting_timestamp.year}"
_end_day = f"{_ending_timestamp.month}/{_ending_timestamp.day}/{_ending_timestamp.year}"
data_periods_idx = None
for idx, _line in enumerate(meta_lines):
if _line.startswith("DATA PERIODS"):
data_periods_idx = idx
data_periods_line = "DATA PERIODS,"
data_periods_line += f"{_num_data_periods},{_records_per_hour},"
data_periods_line += f"{_data_period_name},{_start_day_of_week},"
data_periods_line += f"{_start_day},{_end_day}"
data_periods_line += "\n"
if data_periods_idx:
meta_lines[data_periods_idx] = data_periods_line
else:
meta_lines.append(data_periods_line)
return meta_lines
def read_epw(self, fpath):
"""
Given a file-like buffer with data in Energy Plus Weather (EPW) format,
parse the data into a dataframe.
EPW data is composed of data from different years.
EPW files always have 365*24 = 8760 data rows
be careful with the use of leap years.
Parameters
----------
csvdata : file-like buffer
a file-like buffer containing data in the EPW format
Returns
-------
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the EnergyPlus Auxiliary Programs documentation
available at: https://energyplus.net/documentation.
meta : dict
The site metadata available in the file.
meta_epw_lines : list
All lines of meta data.
See Also
--------
pvlib.iotools.read_epw
"""
# read meta data into list of lines, determine n_meta_line
# the last meta data line is marked with "DATA PERIODS"
meta_epw_lines = []
with open(fpath, "r") as f:
for n_meta_line in range(10):
meta_epw_lines.append(f.readline())
if meta_epw_lines[n_meta_line].split(",")[0] == "DATA PERIODS":
break
meta = dict(zip(self.epw_meta_keys, meta_epw_lines[0].rstrip("\n").split(",")))
meta["altitude"] = float(meta["altitude"])
meta["latitude"] = float(meta["latitude"])
meta["longitude"] = float(meta["longitude"])
meta["TZ"] = float(meta["TZ"])
# use starting line determined above
data = pd.read_csv(
fpath, skiprows=n_meta_line, header=0, names=self.epw_columns
)
data = data.astype(
{
"year": "Int16",
"month": "Int8",
"day": "Int8",
"hour": "Int8",
"minute": "Int8",
},
)
return data, meta, meta_epw_lines
def convert_epw_to_internal(self, data, use_datetime=True, force_year=None):
# some EPW files have minutes=60 to represent the end of the hour
# this doesnt actually mean it is the next hour, it should be 0
# see: https://discourse.radiance-online.org/t/ \
# meaning-of-epw-files-minute-field-and-why-is-it-60-in-tmy2- \
# based-epw-and-0-in-tmy3-based-epw/1462/3
if data["minute"].mean() == 60:
data["minute"] = 0
# EPW format uses hour = [1-24], set to [0-23]
data["hour"] = data["hour"] - 1
if force_year:
data["year"] = int(force_year)
# create datetime column in UTC
data[self.datetime_column] = pd.to_datetime(
data[["year", "month", "day", "hour", "minute"]]
.astype(int)
.astype(str)
.apply("-".join, 1),
format="%Y-%m-%d-%H-%M",
)
# localize and convert to UTC
data[self.datetime_column] = (
data[self.datetime_column]
.dt.tz_localize(int(self.epw_meta["TZ"] * 3600))
.dt.tz_convert(tz="UTC")
)
# reset year, month, day, hour, minute columns
# the year must be forced back to wrap the year after TZ shift
data["year"] = force_year
data["month"] = data[self.datetime_column].dt.month
data["day"] = data[self.datetime_column].dt.day
data["hour"] = data[self.datetime_column].dt.hour
data["minute"] = data[self.datetime_column].dt.minute
data = data.sort_values(
["year", "month", "day", "hour", "minute"], ascending=True
)
if not use_datetime:
data = data.drop(axis="columns", columns=[self.datetime_column])
return data
def get_cdo(self):
raise NotImplementedError
# TODO:
# https://www.ncdc.noaa.gov/cdo-web/webservices/v2#gettingStarted
pass
def get_psm(self, location):
raise NotImplementedError
# TODO:
# url = (
# "https://developer.nrel.gov/api/nsrdb/v2/solar/psm3-tmy-download.csv"
# + f"?wkt=POINT({lon}%20{lat})&names={year}&leap_day={leap_year}"
# + f"&api_key={self.nrel_dev_api_key}&attributes={attributes}"
# + f"&utc={utc}&full_name={name}&email={email}&interval={interval}"
# )
pass
def get_tmy_fill_epw(self, lat, lon):
eplus_github_weather_geojson_url = "https://raw.githubusercontent.com/NREL/EnergyPlus/develop/weather/master.geojson"
# check for cached eplus geojson
# cache is updated daily
cache_name = f"eplus_geojson_cache_{datetime.today().strftime('%Y_%m_%d')}.csv"
if self.archive_tmy3_dir:
cache_path = os.path.join(self.archive_tmy3_dir, cache_name)
if self.archive_tmy3_dir and os.path.exists(cache_path):
logger.info(f"Reading TMY weather geojson from cache: {cache_path}")
df = pd.read_csv(cache_path)
if df.empty:
logger.error("Cached TMY weather geojson is empty.")
else:
logger.info(
f"Downloading TMY weather geojson from: {eplus_github_weather_geojson_url}"
)
df = pd.json_normalize(
pd.read_json(eplus_github_weather_geojson_url).features
)
# parse coordinates column to lat lon in radians for usage
# the geojson coordinates are [lon, lat]
coordinates_col = "geometry.coordinates"
df[["lon", "lat"]] = pd.DataFrame(
df[coordinates_col].to_list(), columns=["lon", "lat"]
)
df = df.drop(axis="columns", columns=[coordinates_col])
df["lat"] = np.radians(df["lat"])
df["lon"] = np.radians(df["lon"])
if self.archive_tmy3_dir and os.path.isdir(self.archive_tmy3_dir):
df.to_csv(cache_path, index=False)
# convert query point to radians and set dimensionality to (2,1)
qp = np.radians(np.atleast_2d(np.array([lat, lon])))
dis = haversine_distances(df[["lat", "lon"]].values, qp)
# TODO: add finding of TMY3 datasets over TMY of same/similar location
# e.g. for phoenix this method find TMY data while TMY3 data exists but
# has different coordinates
epw_href = df.iloc[np.argmin(dis)]["properties.epw"]
# extract download URL from html link
match = re.search(r'href=[\'"]?([^\'" >]+)', epw_href)
fpath = None
if match:
epw_url = match.group(1)
fname = epw_url.split("/")[-1]
if fname:
fpath = os.path.join(self.ep_tmy3_cache_dir, fname)
# if already downloaded return name and path to cache
if not os.path.exists(fpath):
logger.info(f"Downloading TMY weather data from: {epw_url}")
res = requests.get(epw_url, allow_redirects=True)
if res.status_code == 200:
with open(fpath, "wb") as f:
f.write(res.content)
return fpath, fname
def get_archive_tmy3(self, lat, lon):
"""Retrieve TMY3 data from archive based on minimum haversine distance.
This requries downloading archive.
See README.md section on NSRDB 1991-2005 Archive Data:
The archived data contains the most recent TMY3 data with the fields
required by the EPW format. Download the archive from:
https://nsrdb.nrel.gov/data-sets/archives.html
Note: The archive is ~3 GB, but only the TMY data
(~300MB compressed, 1.7 GB uncompressed) is required and
the hourly data can be deleted after download.
:param lat: latitude
:type lat: float
:param lon: longitude
:type lon: float
:return: TMY3 data
:rtype: pd.DataFrame
"""
# only need these columns
tmy3_meta = pd.read_csv(
self.archive_tmy3_meta, usecols=["USAF", "Latitude", "Longitude"]
)
# convert query point and all stations all to radians
qp = np.radians(np.atleast_2d(np.array([lat, lon])))
tmy3_meta["Latitude"] = np.radians(tmy3_meta["Latitude"])
tmy3_meta["Longitude"] = np.radians(tmy3_meta["Longitude"])
# compute haversine distance from all stations to query point
dis = haversine_distances(tmy3_meta[["Latitude", "Longitude"]].values, qp)
# station that minimizes distance from query point should be used
usaf_code = tmy3_meta.USAF[np.argmin(dis)]
# read tmy3 data from archive using usaf code
return pd.read_csv(
f"{self.archive_tmy3_data_dir}/{usaf_code}TYA.CSV", skiprows=1
)
def get_psm3_tmy3(self, location):
# TODO: finish implementing
raise NotImplementedError
lat = 43.83452
lon = -99.49218
# attributes to extract (e.g., dhi, ghi, etc.), separated by commas.
attributes = ",".join(
[
"ghi",
"dhi",
"dni",
"surface_pressure",
"wind_direction",
"wind_speed",
"surface_albedo",
]
)
# Choose year of data
names = "tmy"
# local time zone or UTC (confirmed works for TMY3)
utc = "true"
# see: https://developer.nrel.gov/docs/solar/nsrdb/psm3-tmy-download/
# email address is required
url_tmy = (
"https://developer.nrel.gov/api/nsrdb/v2/solar/psm3-tmy-download.csv"
+ f"?wkt=POINT({lon}%20{lat})"
+ f"&names={names}"
+ f"&api_key={self.nrel_dev_api_key}"
+ f"&attributes={attributes}"
+ f"&utc={utc}"
+ f"&email={self.nrel_dev_email}"
)
# Return just the first 2 lines to get metadata:
meta = | pd.read_csv(url_tmy, nrows=1) | pandas.read_csv |
# Module: Preprocess
# Author: <NAME> <<EMAIL>>
# License: MIT
import pandas as pd
import numpy as np
import ipywidgets as wg
from IPython.display import display
from ipywidgets import Layout
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, clone
from sklearn.impute._base import _BaseImputer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.cross_decomposition import PLSRegression
from sklearn.manifold import TSNE
from sklearn.decomposition import IncrementalPCA
from sklearn.preprocessing import KBinsDiscretizer
from pyod.models.knn import KNN
from pyod.models.iforest import IForest
from pyod.models.pca import PCA as PCA_od
from sklearn import cluster
from scipy import stats
from sklearn.ensemble import RandomForestClassifier as rfc
from sklearn.ensemble import RandomForestRegressor as rfr
from lightgbm import LGBMClassifier as lgbmc
from lightgbm import LGBMRegressor as lgbmr
import sys
import gc
from sklearn.pipeline import Pipeline
from sklearn import metrics
from datetime import datetime
import calendar
from sklearn.preprocessing import LabelEncoder
from collections import defaultdict
from typing import Optional, Union
from pycaret.internal.logging import get_logger
from pycaret.internal.utils import infer_ml_usecase
from sklearn.utils.validation import check_is_fitted, check_X_y, check_random_state
from sklearn.utils.validation import _deprecate_positional_args
from sklearn.utils import _safe_indexing
from sklearn.exceptions import NotFittedError
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", 500)
SKLEARN_EMPTY_STEP = "passthrough"
# _____________________________________________________________________________________________________________________________
def str_if_not_null(x):
if pd.isnull(x) or (x is None) or pd.isna(x) or (x is not x):
return x
return str(x)
def find_id_columns(data, target, numerical_features):
# some times we have id column in the data set, we will try to find it and then will drop it if found
len_samples = len(data)
id_columns = []
for i in data.select_dtypes(
include=["object", "int64", "float64", "float32"]
).columns:
col = data[i]
if i not in numerical_features and i != target:
if sum(col.isnull()) == 0:
try:
col = col.astype("int64")
except:
continue
if col.nunique() == len_samples:
# we extract column and sort it
features = col.sort_values()
# no we subtract i+1-th value from i-th (calculating increments)
increments = features.diff()[1:]
# if all increments are 1 (with float tolerance), then the column is ID column
if sum(np.abs(increments - 1) < 1e-7) == len_samples - 1:
id_columns.append(i)
return id_columns
class DataTypes_Auto_infer(BaseEstimator, TransformerMixin):
"""
- This will try to infer data types automatically, option to override learent data types is also available.
- This alos automatically delets duplicate columns (values or same colume name), removes rows where target variable is null and
remove columns and rows where all the records are null
"""
def __init__(
self,
target,
ml_usecase,
categorical_features=[],
numerical_features=[],
time_features=[],
features_todrop=[],
id_columns=[],
display_types=True,
float_dtype="float32",
): # nothing to define
"""
User to define the target (y) variable
args:
target: string, name of the target variable
ml_usecase: string , 'regresson' or 'classification . For now, only supports two class classification
- this is useful in case target variable is an object / string . it will replace the strings with integers
categorical_features: list of categorical features, default None, when None best guess will be used to identify categorical features
numerical_features: list of numerical features, default None, when None best guess will be used to identify numerical features
time_features: list of date/time features, default None, when None best guess will be used to identify date/time features
"""
self.target = target
self.ml_usecase = ml_usecase
self.features_todrop = [str(x) for x in features_todrop]
self.categorical_features = [
x for x in categorical_features if x not in self.features_todrop
]
self.numerical_features = [
x for x in numerical_features if x not in self.features_todrop
]
self.time_features = [x for x in time_features if x not in self.features_todrop]
self.display_types = display_types
self.id_columns = id_columns
self.float_dtype = float_dtype
def fit(self, dataset, y=None): # learning data types of all the columns
"""
Args:
data: accepts a pandas data frame
Returns:
Panda Data Frame
"""
data = dataset.copy()
# also make sure that all the column names are string
data.columns = [str(i) for i in data.columns]
# drop any columns that were asked to drop
data.drop(columns=self.features_todrop, errors="ignore", inplace=True)
# remove sepcial char from column names
# data.columns= data.columns.str.replace('[,]','')
# we will take float as numberic, object as categorical from the begning
# fir int64, we will check to see what is the proportion of unique counts to the total lenght of the data
# if proportion is lower, then it is probabaly categorical
# however, proportion can be lower / disturebed due to samller denominator (total lenghth / number of samples)
# so we will take the following chart
# 0-50 samples, threshold is 24%
# 50-100 samples, th is 12%
# 50-250 samples , th is 4.8%
# 250-500 samples, th is 2.4%
# 500 and above 2% or belwo
# if there are inf or -inf then replace them with NaN
data.replace([np.inf, -np.inf], np.NaN, inplace=True)
# we canc check if somehow everything is object, we can try converting them in float
for i in data.select_dtypes(include=["object"]).columns:
try:
data[i] = data[i].astype("int64")
except:
None
for i in (
data.select_dtypes(include=["object"])
.drop(self.target, axis=1, errors="ignore")
.columns
):
try:
data[i] = pd.to_datetime(
data[i], infer_datetime_format=True, utc=False, errors="raise"
)
except:
continue
# if data type is bool or pandas Categorical , convert to categorical
for i in data.select_dtypes(include=["bool", "category"]).columns:
data[i] = data[i].astype("object")
# wiith csv , if we have any null in a colum that was int , panda will read it as float.
# so first we need to convert any such floats that have NaN and unique values are lower than 20
for i in data.select_dtypes(include=["float64"]).columns:
data[i] = data[i].astype(self.float_dtype)
# count how many Nas are there
na_count = sum(data[i].isnull())
# count how many digits are there that have decimiles
count_float = np.nansum(
[False if r.is_integer() else True for r in data[i]]
)
# total decimiels digits
count_float = (
count_float - na_count
) # reducing it because we know NaN is counted as a float digit
# now if there isnt any float digit , & unique levales are less than 20 and there are Na's then convert it to object
if (count_float == 0) & (data[i].nunique() <= 20) & (na_count > 0):
data[i] = data[i].astype("object")
# should really be an absolute number say 20
# length = len(data.iloc[:,0])
# if length in range(0,51):
# th=.25
# elif length in range(51,101):
# th=.12
# elif length in range(101,251):
# th=.048
# elif length in range(251,501):
# th=.024
# elif length > 500:
# th=.02
# if column is int and unique counts are more than two, then: (exclude target)
for i in data.select_dtypes(include=["int64"]).columns:
if i != self.target:
if data[i].nunique() <= 20: # hard coded
data[i] = data[i].apply(str_if_not_null)
else:
data[i] = data[i].astype(self.float_dtype)
# # if colum is objfloat and only have two unique counts , this is probabaly one hot encoded
# # make it object
for i in data.select_dtypes(include=[self.float_dtype]).columns:
if data[i].nunique() == 2:
data[i] = data[i].apply(str_if_not_null)
# for time & dates
# self.drop_time = [] # for now we are deleting time columns
# now in case we were given any specific columns dtypes in advance , we will over ride theos
for i in self.categorical_features:
try:
data[i] = data[i].apply(str_if_not_null)
except:
data[i] = dataset[i].apply(str_if_not_null)
for i in self.numerical_features:
try:
data[i] = data[i].astype(self.float_dtype)
except:
data[i] = dataset[i].astype(self.float_dtype)
for i in self.time_features:
try:
data[i] = pd.to_datetime(
data[i], infer_datetime_format=True, utc=False, errors="raise"
)
except:
data[i] = pd.to_datetime(
dataset[i], infer_datetime_format=True, utc=False, errors="raise"
)
for i in data.select_dtypes(
include=["datetime64", "datetime64[ns, UTC]"]
).columns:
data[i] = data[i].astype("datetime64[ns]")
# table of learent types
self.learned_dtypes = data.dtypes
# self.training_columns = data.drop(self.target,axis=1).columns
# if there are inf or -inf then replace them with NaN
data = data.replace([np.inf, -np.inf], np.NaN).astype(self.learned_dtypes)
# lets remove duplicates
# remove duplicate columns (columns with same values)
# (too expensive on bigger data sets)
# data_c = data.T.drop_duplicates()
# data = data_c.T
# remove columns with duplicate name
data = data.loc[:, ~data.columns.duplicated()]
# Remove NAs
data.dropna(axis=0, how="all", inplace=True)
data.dropna(axis=1, how="all", inplace=True)
# remove the row if target column has NA
try:
data.dropna(subset=[self.target], inplace=True)
except KeyError:
pass
# self.training_columns = data.drop(self.target,axis=1).columns
# since due to transpose , all data types have changed, lets change the dtypes to original---- not required any more since not transposing any more
# for i in data.columns: # we are taking all the columns in test , so we dot have to worry about droping target column
# data[i] = data[i].astype(self.learned_dtypes[self.learned_dtypes.index==i])
if self.display_types == True:
display(
wg.Text(
value="Following data types have been inferred automatically, if they are correct press enter to continue or type 'quit' otherwise.",
layout=Layout(width="100%"),
),
display_id="m1",
)
dt_print_out = pd.DataFrame(
self.learned_dtypes, columns=["Feature_Type"]
).drop("UNSUPERVISED_DUMMY_TARGET", errors="ignore")
dt_print_out["Data Type"] = ""
for i in dt_print_out.index:
if i != self.target:
if i in self.id_columns:
dt_print_out.loc[i, "Data Type"] = "ID Column"
elif dt_print_out.loc[i, "Feature_Type"] == "object":
dt_print_out.loc[i, "Data Type"] = "Categorical"
elif dt_print_out.loc[i, "Feature_Type"] == self.float_dtype:
dt_print_out.loc[i, "Data Type"] = "Numeric"
elif dt_print_out.loc[i, "Feature_Type"] == "datetime64[ns]":
dt_print_out.loc[i, "Data Type"] = "Date"
# elif dt_print_out.loc[i,'Feature_Type'] == 'int64':
# dt_print_out.loc[i,'Data Type'] = 'Categorical'
else:
dt_print_out.loc[i, "Data Type"] = "Label"
# if we added the dummy target column , then drop it
dt_print_out.drop(index="dummy_target", errors="ignore", inplace=True)
display(dt_print_out[["Data Type"]])
self.response = input()
if self.response in [
"quit",
"Quit",
"exit",
"EXIT",
"q",
"Q",
"e",
"E",
"QUIT",
"Exit",
]:
sys.exit(
"Read the documentation of setup to learn how to overwrite data types over the inferred types. setup function must run again before you continue modeling."
)
# drop time columns
# data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)
# drop id columns
data.drop(self.id_columns, axis=1, errors="ignore", inplace=True)
return data
def transform(self, dataset, y=None):
"""
Args:
data: accepts a pandas data frame
Returns:
Panda Data Frame
"""
data = dataset.copy()
# also make sure that all the column names are string
data.columns = [str(i) for i in data.columns]
# drop any columns that were asked to drop
data.drop(columns=self.features_todrop, errors="ignore", inplace=True)
data = data[self.final_training_columns]
# also make sure that all the column names are string
data.columns = [str(i) for i in data.columns]
# if there are inf or -inf then replace them with NaN
data.replace([np.inf, -np.inf], np.NaN, inplace=True)
try:
data.dropna(subset=[self.target], inplace=True)
except KeyError:
pass
# remove sepcial char from column names
# data.columns= data.columns.str.replace('[,]','')
# very first thing we need to so is to check if the training and test data hace same columns
for i in self.final_training_columns:
if i not in data.columns:
raise TypeError(
f"test data does not have column {i} which was used for training."
)
# just keep picking the data and keep applying to the test data set (be mindful of target variable)
for (
i
) in (
data.columns
): # we are taking all the columns in test , so we dot have to worry about droping target column
if i == self.target and (
(self.ml_usecase == "classification")
and (self.learned_dtypes[self.target] == "object")
):
data[i] = self.le.transform(data[i].apply(str).astype("object"))
data[i] = data[i].astype("int64")
else:
if self.learned_dtypes[i].name == "datetime64[ns]":
data[i] = pd.to_datetime(
data[i], infer_datetime_format=True, utc=False, errors="coerce"
)
data[i] = data[i].astype(self.learned_dtypes[i])
# drop time columns
# data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)
# drop id columns
data.drop(self.id_columns, axis=1, errors="ignore", inplace=True)
return data
# fit_transform
def fit_transform(self, dataset, y=None):
data = dataset
# since this is for training , we dont nees any transformation since it has already been transformed in fit
data = self.fit(data)
# additionally we just need to treat the target variable
# for ml use ase
if (self.ml_usecase == "classification") & (
data[self.target].dtype == "object"
):
self.le = LabelEncoder()
data[self.target] = self.le.fit_transform(
data[self.target].apply(str).astype("object")
)
self.replacement = _get_labelencoder_reverse_dict(self.le)
# self.u = list(pd.unique(data[self.target]))
# self.replacement = np.arange(0,len(self.u))
# data[self.target]= data[self.target].replace(self.u,self.replacement)
# data[self.target] = data[self.target].astype('int64')
# self.replacement = pd.DataFrame(dict(target_variable=self.u,replaced_with=self.replacement))
# drop time columns
# data.drop(self.drop_time,axis=1,errors='ignore',inplace=True)
# drop id columns
data.drop(self.id_columns, axis=1, errors="ignore", inplace=True)
# finally save a list of columns that we would need from test data set
self.final_training_columns = data.columns.to_list()
self.final_training_columns.remove(self.target)
return data
# _______________________________________________________________________________________________________________________
# Imputation
class Simple_Imputer(_BaseImputer):
"""
Imputes all type of data (numerical,categorical & Time).
Highly recommended to run Define_dataTypes class first
Numerical values can be imputed with mean or median or filled with zeros
categorical missing values will be replaced with "Other"
Time values are imputed with the most frequesnt value
Ignores target (y) variable
Args:
Numeric_strategy: string , all possible values {'mean','median','zero'}
categorical_strategy: string , all possible values {'not_available','most frequent'}
target: string , name of the target variable
fill_value_numerical: number, value for filling missing values of numeric columns
fill_value_categorical: string, value for filling missing values of categorical columns
"""
_numeric_strategies = {
"mean": "mean",
"median": "median",
"most frequent": "most_frequent",
"zero": "constant",
}
_categorical_strategies = {
"most frequent": "most_frequent",
"not_available": "constant",
}
_time_strategies = {
"mean": "mean",
"median": "median",
"most frequent": "most_frequent",
}
def __init__(
self,
numeric_strategy,
categorical_strategy,
time_strategy,
target,
fill_value_numerical=0,
fill_value_categorical="not_available",
):
# Set the target variable, which we don't want to impute
self.target = target
if numeric_strategy not in self._numeric_strategies:
numeric_strategy = "zero"
self.numeric_strategy = numeric_strategy
if categorical_strategy not in self._categorical_strategies:
categorical_strategy = "most frequent"
self.categorical_strategy = categorical_strategy
if time_strategy not in self._time_strategies:
time_strategy = "most frequent"
self.time_strategy = time_strategy
self.fill_value_numerical = fill_value_numerical
self.fill_value_categorical = fill_value_categorical
# self.most_frequent_time = []
self.numeric_imputer = SimpleImputer(
strategy=self._numeric_strategies[self.numeric_strategy],
fill_value=fill_value_numerical,
)
self.categorical_imputer = SimpleImputer(
strategy=self._categorical_strategies[self.categorical_strategy],
fill_value=fill_value_categorical,
)
self.time_imputer = SimpleImputer(
strategy=self._time_strategies[self.time_strategy],
)
def fit(self, X, y=None):
"""
Fit the imputer on dataset.
Args:
X : pd.DataFrame, the dataset to be imputed
Returns:
self : Simple_Imputer
"""
try:
data = X.drop(self.target, axis=1)
except:
data = X
self.numeric_columns = data.select_dtypes(
include=["float32", "float64", "int32", "int64"]
).columns
self.categorical_columns = data.select_dtypes(
include=["object", "bool", "string", "category"]
).columns
self.time_columns = data.select_dtypes(
include=["datetime64[ns]", "timedelta64[ns]"]
).columns
statistics = []
if not self.numeric_columns.empty:
self.numeric_imputer.fit(data[self.numeric_columns])
statistics.append((self.numeric_imputer.statistics_, self.numeric_columns))
if not self.categorical_columns.empty:
self.categorical_imputer.fit(data[self.categorical_columns])
statistics.append(
(self.categorical_imputer.statistics_, self.categorical_columns)
)
if not self.time_columns.empty:
for col in self.time_columns:
data[col] = data[col][data[col].notnull()].astype(np.int64)
self.time_imputer.fit(data[self.time_columns])
statistics.append((self.time_imputer.statistics_, self.time_columns))
self.statistics_ = np.zeros(shape=len(data.columns), dtype=object)
columns = list(data.columns)
for s, index in statistics:
for i, j in enumerate(index):
self.statistics_[columns.index(j)] = s[i]
return self
def transform(self, X, y=None):
"""
Impute all missing values in dataset.
Args:
X: pd.DataFrame, the dataset to be imputed
Returns:
data: pd.DataFrame, the imputed dataset
"""
data = X
imputed_data = []
if not self.numeric_columns.empty:
numeric_data = pd.DataFrame(
self.numeric_imputer.transform(data[self.numeric_columns]),
columns=self.numeric_columns,
index=data.index,
)
imputed_data.append(numeric_data)
if not self.categorical_columns.empty:
categorical_data = pd.DataFrame(
self.categorical_imputer.transform(data[self.categorical_columns]),
columns=self.categorical_columns,
index=data.index,
)
for col in categorical_data.columns:
categorical_data[col] = categorical_data[col].apply(str)
imputed_data.append(categorical_data)
if not self.time_columns.empty:
datetime_columns = data.select_dtypes(include=["datetime"]).columns
timedelta_columns = data.select_dtypes(include=["timedelta"]).columns
timedata_copy = data[self.time_columns].copy()
for col in self.time_columns:
timedata_copy[col] = timedata_copy[col][
timedata_copy[col].notnull()
].astype(np.int64)
time_data = pd.DataFrame(
self.time_imputer.transform(timedata_copy),
columns=self.time_columns,
index=data.index,
)
for col in datetime_columns:
time_data[col][data[col].notnull()] = data[col][data[col].notnull()]
time_data[col] = time_data[col].apply(pd.Timestamp)
for col in timedelta_columns:
time_data[col][data[col].notnull()] = data[col][data[col].notnull()]
time_data[col] = time_data[col].apply(pd.Timedelta)
imputed_data.append(time_data)
if imputed_data:
data.update(pd.concat(imputed_data, axis=1))
data.astype(X.dtypes)
return data
def fit_transform(self, X, y=None):
"""
Fit and impute on dataset.
Args:
X: pd.DataFrame, the dataset to be fitted and imputed
Returns:
pd.DataFrame, the imputed dataset
"""
data = X
self.fit(data)
return self.transform(data)
# _______________________________________________________________________________________________________________________
# Imputation with surrogate columns
class Surrogate_Imputer(_BaseImputer):
"""
Imputes feature with surrogate column (numerical,categorical & Time).
- Highly recommended to run Define_dataTypes class first
- it is also recommended to only apply this to features where it makes business sense to creat surrogate column
- feature name has to be provided
- only able to handle one feature at a time
- Numerical values can be imputed with mean or median or filled with zeros
- categorical missing values will be replaced with "Other"
- Time values are imputed with the most frequesnt value
- Ignores target (y) variable
Args:
feature_name: string, provide features name
feature_type: string , all possible values {'numeric','categorical','date'}
strategy: string ,all possible values {'mean','median','zero','not_available','most frequent'}
target: string , name of the target variable
"""
def __init__(self, numeric_strategy, categorical_strategy, target):
self.numeric_strategy = numeric_strategy
self.target = target
self.categorical_strategy = categorical_strategy
def fit(self, dataset, y=None): #
def zeros(x):
return 0
data = dataset
# make a table for numerical variable with strategy stats
if self.numeric_strategy == "mean":
self.numeric_stats = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.apply(np.nanmean)
)
elif self.numeric_strategy == "median":
self.numeric_stats = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.apply(np.nanmedian)
)
else:
self.numeric_stats = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.apply(zeros)
)
self.numeric_columns = (
data.drop(self.target, axis=1)
.select_dtypes(include=["float32", "float64", "int64"])
.columns
)
# also need to learn if any columns had NA in training
self.numeric_na = pd.DataFrame(columns=self.numeric_columns)
for i in self.numeric_columns:
if data[i].isnull().any() == True:
self.numeric_na.loc[0, i] = True
else:
self.numeric_na.loc[0, i] = False
# for Catgorical ,
if self.categorical_strategy == "most frequent":
self.categorical_columns = (
data.drop(self.target, axis=1).select_dtypes(include=["object"]).columns
)
self.categorical_stats = pd.DataFrame(
columns=self.categorical_columns
) # place holder
for i in self.categorical_stats.columns:
self.categorical_stats.loc[0, i] = data[i].value_counts().index[0]
# also need to learn if any columns had NA in training, but this is only valid if strategy is "most frequent"
self.categorical_na = pd.DataFrame(columns=self.categorical_columns)
for i in self.categorical_columns:
if sum(data[i].isnull()) > 0:
self.categorical_na.loc[0, i] = True
else:
self.categorical_na.loc[0, i] = False
else:
self.categorical_columns = (
data.drop(self.target, axis=1).select_dtypes(include=["object"]).columns
)
self.categorical_na = pd.DataFrame(columns=self.categorical_columns)
self.categorical_na.loc[
0, :
] = False # (in this situation we are not making any surrogate column)
# for time, there is only one way, pick up the most frequent one
self.time_columns = (
data.drop(self.target, axis=1)
.select_dtypes(include=["datetime64[ns]"])
.columns
)
self.time_stats = pd.DataFrame(columns=self.time_columns) # place holder
self.time_na = pd.DataFrame(columns=self.time_columns)
for i in self.time_columns:
self.time_stats.loc[0, i] = data[i].value_counts().index[0]
# learn if time columns were NA
for i in self.time_columns:
if data[i].isnull().any() == True:
self.time_na.loc[0, i] = True
else:
self.time_na.loc[0, i] = False
return data # nothing to return
def transform(self, dataset, y=None):
data = dataset
# for numeric columns
for i, s in zip(data[self.numeric_columns].columns, self.numeric_stats):
array = data[i].isnull()
data[i].fillna(s, inplace=True)
# make a surrogate column if there was any
if self.numeric_na.loc[0, i] == True:
data[i + "_surrogate"] = array
# make it string
data[i + "_surrogate"] = data[i + "_surrogate"].apply(str)
# for categorical columns
if self.categorical_strategy == "most frequent":
for i in self.categorical_stats.columns:
# data[i].fillna(self.categorical_stats.loc[0,i],inplace=True)
array = data[i].isnull()
data[i] = data[i].fillna(self.categorical_stats.loc[0, i])
data[i] = data[i].apply(str)
# make surrogate column
if self.categorical_na.loc[0, i] == True:
data[i + "_surrogate"] = array
# make it string
data[i + "_surrogate"] = data[i + "_surrogate"].apply(str)
else: # this means replace na with "not_available"
for i in self.categorical_columns:
data[i].fillna("not_available", inplace=True)
data[i] = data[i].apply(str)
# no need to make surrogate since not_available is itself a new colum
# for time
for i in self.time_stats.columns:
array = data[i].isnull()
data[i].fillna(self.time_stats.loc[0, i], inplace=True)
# make surrogate column
if self.time_na.loc[0, i] == True:
data[i + "_surrogate"] = array
# make it string
data[i + "_surrogate"] = data[i + "_surrogate"].apply(str)
return data
def fit_transform(self, dataset, y=None):
data = dataset
data = self.fit(data)
return self.transform(data)
class Iterative_Imputer(_BaseImputer):
def __init__(
self,
regressor: BaseEstimator,
classifier: BaseEstimator,
*,
target=None,
missing_values=np.nan,
initial_strategy_numeric: str = "mean",
initial_strategy_categorical: str = "most frequent",
initial_strategy_time: str = "most frequent",
ordinal_columns: Optional[list] = None,
max_iter: int = 10,
warm_start: bool = False,
imputation_order: str = "ascending",
verbose: int = 0,
random_state: int = None,
add_indicator: bool = False,
):
super().__init__(missing_values=missing_values, add_indicator=add_indicator)
self.regressor = regressor
self.classifier = classifier
self.initial_strategy_numeric = initial_strategy_numeric
self.initial_strategy_categorical = initial_strategy_categorical
self.initial_strategy_time = initial_strategy_time
self.max_iter = max_iter
self.warm_start = warm_start
self.imputation_order = imputation_order
self.verbose = verbose
self.random_state = random_state
self.target = target
if ordinal_columns is None:
ordinal_columns = []
self.ordinal_columns = list(ordinal_columns)
self._column_cleaner = Clean_Colum_Names()
def _initial_imputation(self, X):
if self.initial_imputer_ is None:
self.initial_imputer_ = Simple_Imputer(
target="__TARGET__", # dummy value, we don't actually want to drop anything
numeric_strategy=self.initial_strategy_numeric,
categorical_strategy=self.initial_strategy_categorical,
time_strategy=self.initial_strategy_time,
)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
return X_filled
def _impute_one_feature(self, X, column, X_na_mask, fit):
if not fit:
check_is_fitted(self)
is_classification = (
X[column].dtype.name == "object" or column in self.ordinal_columns
)
if is_classification:
if column in self.classifiers_:
time, dummy, le, estimator = self.classifiers_[column]
elif not fit:
return X
else:
estimator = clone(self._classifier)
time = Make_Time_Features()
dummy = Dummify(column)
le = LabelEncoder()
else:
if column in self.regressors_:
time, dummy, le, estimator = self.regressors_[column]
elif not fit:
return X
else:
estimator = clone(self._regressor)
time = Make_Time_Features()
dummy = Dummify(column)
le = None
if fit:
fit_kwargs = {}
X_train = X[~X_na_mask[column]]
y_train = X_train[column]
# catboost handles categoricals itself
if "catboost" not in str(type(estimator)).lower():
X_train = time.fit_transform(X_train)
X_train = dummy.fit_transform(X_train)
X_train.drop(column, axis=1, inplace=True)
else:
X_train.drop(column, axis=1, inplace=True)
fit_kwargs["cat_features"] = []
for i, col in enumerate(X_train.columns):
if X_train[col].dtype.name == "object":
X_train[col] = pd.Categorical(
X_train[col], ordered=column in self.ordinal_columns
)
fit_kwargs["cat_features"].append(i)
fit_kwargs["cat_features"] = np.array(
fit_kwargs["cat_features"], dtype=int
)
X_train = self._column_cleaner.fit_transform(X_train)
if le:
y_train = le.fit_transform(y_train)
try:
assert self.warm_start
estimator.partial_fit(X_train, y_train)
except:
estimator.fit(X_train, y_train, **fit_kwargs)
X_test = X.drop(column, axis=1)[X_na_mask[column]]
X_test = time.transform(X_test)
# catboost handles categoricals itself
if "catboost" not in str(type(estimator)).lower():
X_test = dummy.transform(X_test)
else:
for col in X_test.select_dtypes("object").columns:
X_test[col] = pd.Categorical(
X_test[col], ordered=column in self.ordinal_columns
)
result = estimator.predict(X_test)
if le:
result = le.inverse_transform(result)
if fit:
if is_classification:
self.classifiers_[column] = (time, dummy, le, estimator)
else:
self.regressors_[column] = (time, dummy, le, estimator)
if result.dtype.name == "float64":
result = result.astype("float32")
X_test[column] = result
X.update(X_test[column])
gc.collect()
return X
def _impute(self, X, fit: bool):
if self.target in X.columns:
target_column = X[self.target]
X = X.drop(self.target, axis=1)
else:
target_column = None
original_columns = X.columns
original_index = X.index
X = X.reset_index(drop=True)
X = self._column_cleaner.fit_transform(X)
self.imputation_sequence_ = (
X.isnull().sum().sort_values(ascending=self.imputation_order == "ascending")
)
self.imputation_sequence_ = [
col
for col in self.imputation_sequence_[self.imputation_sequence_ > 0].index
if X[col].dtype.name != "datetime64[ns]"
]
X_na_mask = X.isnull()
X_imputed = self._initial_imputation(X.copy())
for i in range(self.max_iter if fit else 1):
for feature in self.imputation_sequence_:
get_logger().info(f"Iterative Imputation: {i+1} cycle | {feature}")
X_imputed = self._impute_one_feature(X_imputed, feature, X_na_mask, fit)
X_imputed.columns = original_columns
X_imputed.index = original_index
if target_column is not None:
X_imputed[self.target] = target_column
return X_imputed
def transform(self, X, y=None, **fit_params):
return self._impute(X, fit=False)
def fit_transform(self, X, y=None, **fit_params):
self.random_state_ = getattr(
self, "random_state_", check_random_state(self.random_state)
)
if self.regressor is None:
raise ValueError("No regressor provided")
else:
self._regressor = clone(self.regressor)
try:
self._regressor.set_param(random_state=self.random_state_)
except:
pass
if self.classifier is None:
raise ValueError("No classifier provided")
else:
self._classifier = clone(self.classifier)
try:
self._classifier.set_param(random_state=self.random_state_)
except:
pass
self.classifiers_ = {}
self.regressors_ = {}
self.initial_imputer_ = None
return self._impute(X, fit=True)
def fit(self, X, y=None, **fit_params):
self.fit_transform(X, y=y, **fit_params)
return self
# _______________________________________________________________________________________________________________________
# Zero and Near Zero Variance
class Zroe_NearZero_Variance(BaseEstimator, TransformerMixin):
"""
- it eliminates the features having zero variance
- it eliminates the features haveing near zero variance
- Near zero variance is determined by
-1) Count of unique points divided by the total length of the feature has to be lower than a pre sepcified threshold
-2) Most common point(count) divided by the second most common point(count) in the feature is greater than a pre specified threshold
Once both conditions are met , the feature is dropped
-Ignores target variable
Args:
threshold_1: float (between 0.0 to 1.0) , default is .10
threshold_2: int (between 1 to 100), default is 20
tatget variable : string, name of the target variable
"""
def __init__(self, target, threshold_1=0.1, threshold_2=20):
self.threshold_1 = threshold_1
self.threshold_2 = threshold_2
self.target = target
def fit(
self, dataset, y=None
): # from training data set we are going to learn what columns to drop
data = dataset
self.to_drop = []
sampl_len = len(data[self.target])
for i in data.drop(self.target, axis=1).columns:
# get the number of unique counts
u = pd.DataFrame(data[i].value_counts()).sort_values(
by=i, ascending=False, inplace=False
)
# take len of u and divided it by the total sample numbers, so this will check the 1st rule , has to be low say 10%
# import pdb; pdb.set_trace()
first = len(u) / sampl_len
# then check if most common divided by 2nd most common ratio is 20 or more
if (
len(u[i]) == 1
): # this means that if column is non variance , automatically make the number big to drop it
second = 100
else:
second = u.iloc[0, 0] / u.iloc[1, 0]
# if both conditions are true then drop the column, however, we dont want to alter column that indicate NA's
if (first <= 0.10) and (second >= 20) and (i[-10:] != "_surrogate"):
self.to_drop.append(i)
# now drop if the column has zero variance
if (second == 100) and (i[-10:] != "_surrogate"):
self.to_drop.append(i)
def transform(
self, dataset, y=None
): # since it is only for training data set , nothing here
data = dataset.drop(self.to_drop, axis=1)
return data
def fit_transform(self, dataset, y=None):
data = dataset
self.fit(data)
return self.transform(data)
# ____________________________________________________________________________________________________________________________
# rare catagorical variables
class Catagorical_variables_With_Rare_levels(BaseEstimator, TransformerMixin):
"""
-Merges levels in catagorical features with more frequent level if they appear less than a threshold count
e.g. Col=[a,a,a,a,b,b,c,c]
if threshold is set to 2 , then c will be mrged with b because both are below threshold
There has to be atleast two levels belwo threshold for this to work
the process will keep going until all the levels have atleast 2(threshold) counts
-Only handles catagorical features
-It is recommended to run the Zroe_NearZero_Variance and Define_dataTypes first
-Ignores target variable
Args:
threshold: int , default 10
target: string , name of the target variable
new_level_name: string , name given to the new level generated, default 'others'
"""
def __init__(self, target, new_level_name="others_infrequent", threshold=0.05):
self.threshold = threshold
self.target = target
self.new_level_name = new_level_name
def fit(
self, dataset, y=None
): # we will learn for what columnns what are the level to merge as others
# every level of the catagorical feature has to be more than threshols, if not they will be clubed togather as "others"
# in order to apply, there should be atleast two levels belwo the threshold !
# creat a place holder
data = dataset
self.ph = pd.DataFrame(
columns=data.drop(self.target, axis=1)
.select_dtypes(include="object")
.columns
)
# ph.columns = df.columns# catagorical only
for i in data[self.ph.columns].columns:
# determine the infrequebt count
v_c = data[i].value_counts()
count_th = round(v_c.quantile(self.threshold))
a = np.sum(
pd.DataFrame(data[i].value_counts().sort_values())[i] <= count_th
)
if a >= 2: # rare levels has to be atleast two
count = pd.DataFrame(data[i].value_counts().sort_values())
count.columns = ["fre"]
count = count[count["fre"] <= count_th]
to_club = list(count.index)
self.ph.loc[0, i] = to_club
else:
self.ph.loc[0, i] = []
# # also need to make a place holder that keep records of all the levels , and in case a new level appears in test we will change it to others
# self.ph_level = pd.DataFrame(columns=data.drop(self.target,axis=1).select_dtypes(include="object").columns)
# for i in self.ph_level.columns:
# self.ph_level.loc[0,i] = list(data[i].value_counts().sort_values().index)
def transform(self, dataset, y=None): #
# transorm
data = dataset
for i in data[self.ph.columns].columns:
t_replace = self.ph.loc[0, i]
data[i].replace(
to_replace=t_replace, value=self.new_level_name, inplace=True
)
return data
def fit_transform(self, dataset, y=None):
data = dataset
self.fit(data)
return self.transform(data)
# _______________________________________________________________________________________________________________________
# new catagorical level in test
class New_Catagorical_Levels_in_TestData(BaseEstimator, TransformerMixin):
"""
-This treats if a new level appears in the test dataset catagorical's feature (i.e a level on whihc model was not trained previously)
-It simply replaces the new level in test data set with the most frequent or least frequent level in the same feature in the training data set
-It is recommended to run the Zroe_NearZero_Variance and Define_dataTypes first
-Ignores target variable
Args:
target: string , name of the target variable
replacement_strategy:string , 'raise exception', 'least frequent' or 'most frequent' (default 'most frequent' )
"""
def __init__(self, target, replacement_strategy="most frequent"):
self.target = target
self.replacement_strategy = replacement_strategy
def fit(self, data, y=None):
# need to make a place holder that keep records of all the levels , and in case a new level appears in test we will change it to others
self.ph_train_level = pd.DataFrame(
columns=data.drop(self.target, axis=1)
.select_dtypes(include="object")
.columns
)
for i in self.ph_train_level.columns:
if self.replacement_strategy == "least frequent":
self.ph_train_level.loc[0, i] = list(
data[i].value_counts().sort_values().index
)
else:
self.ph_train_level.loc[0, i] = list(data[i].value_counts().index)
def transform(self, data, y=None): #
# transorm
# we need to learn the same for test data , and then we will compare to check what levels are new in there
self.ph_test_level = pd.DataFrame(
columns=data.drop(self.target, axis=1, errors="ignore")
.select_dtypes(include="object")
.columns
)
for i in self.ph_test_level.columns:
self.ph_test_level.loc[0, i] = list(
data[i].value_counts().sort_values().index
)
# new we have levels for both test and train, we will start comparing and replacing levels in test set (Only if test set has new levels)
for i in self.ph_test_level.columns:
new = list(
(set(self.ph_test_level.loc[0, i]) - set(self.ph_train_level.loc[0, i]))
)
# now if there is a difference , only then replace it
if len(new) > 0:
if self.replacement_strategy == "raise exception":
raise ValueError(
f"Column '{i}' contains levels '{new}' which were not present in train data."
)
data[i].replace(new, self.ph_train_level.loc[0, i][0], inplace=True)
return data
def fit_transform(
self, data, y=None
): # There is no transformation happening in training data set, its all about test
self.fit(data)
return data
# _______________________________________________________________________________________________________________________
# Group akin features
class Group_Similar_Features(BaseEstimator, TransformerMixin):
"""
- Given a list of features , it creates aggregate features
- features created are Min, Max, Mean, Median, Mode & Std
- Only works on numerical features
Args:
list_of_similar_features: list of list, string , e.g. [['col',col2],['col3','col4']]
group_name: list, group name/names to be added as prefix to aggregate features, e.g ['gorup1','group2']
"""
def __init__(self, group_name=[], list_of_grouped_features=[[]]):
self.list_of_similar_features = list_of_grouped_features
self.group_name = group_name
# if list of list not given
try:
np.array(self.list_of_similar_features).shape[0]
except:
raise (
"Group_Similar_Features: list_of_grouped_features is not provided as list of list"
)
def fit(self, data, y=None):
# nothing to learn
return self
def transform(self, dataset, y=None):
data = dataset
# # only going to process if there is an actual missing value in training data set
if len(self.list_of_similar_features) > 0:
for f, g in zip(self.list_of_similar_features, self.group_name):
data[g + "_Min"] = data[f].apply(np.min, 1)
data[g + "_Max"] = data[f].apply(np.max, 1)
data[g + "_Mean"] = data[f].apply(np.mean, 1)
data[g + "_Median"] = data[f].apply(np.median, 1)
data[g + "_Mode"] = stats.mode(data[f], 1)[0]
data[g + "_Std"] = data[f].apply(np.std, 1)
return data
else:
return data
def fit_transform(self, data, y=None):
return self.transform(data)
# ____________________________________________________________________________________________________________________________________________________________________
# Binning for Continious
class Binning(BaseEstimator, TransformerMixin):
"""
- Converts numerical variables to catagorical variable through binning
- Number of binns are automitically determined through Sturges method
- Once discretize, original feature will be dropped
Args:
features_to_discretize: list of featur names to be binned
"""
def __init__(self, features_to_discretize):
self.features_to_discretize = features_to_discretize
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
# only do if features are provided
if len(self.features_to_discretize) > 0:
data_t = self.disc.transform(
np.array(data[self.features_to_discretize]).reshape(
-1, self.len_columns
)
)
# make pandas data frame
data_t = pd.DataFrame(
data_t, columns=self.features_to_discretize, index=data.index
)
# all these columns are catagorical
data_t = data_t.astype(str)
# drop original columns
data.drop(self.features_to_discretize, axis=1, inplace=True)
# add newly created columns
data = pd.concat((data, data_t), axis=1)
return data
def fit_transform(self, dataset, y=None):
data = dataset
# only do if features are given
if len(self.features_to_discretize) > 0:
# place holder for all the features for their binns
self.binns = []
for i in self.features_to_discretize:
# get numbr of binns
hist, _ = np.histogram(data[i], bins="sturges")
self.binns.append(len(hist))
# how many colums to deal with
self.len_columns = len(self.features_to_discretize)
# now do fit transform
self.disc = KBinsDiscretizer(
n_bins=self.binns, encode="ordinal", strategy="kmeans"
)
data_t = self.disc.fit_transform(
np.array(data[self.features_to_discretize]).reshape(
-1, self.len_columns
)
)
# make pandas data frame
data_t = pd.DataFrame(
data_t, columns=self.features_to_discretize, index=data.index
)
# all these columns are catagorical
data_t = data_t.astype(str)
# drop original columns
data.drop(self.features_to_discretize, axis=1, inplace=True)
# add newly created columns
data = pd.concat((data, data_t), axis=1)
return data
# ______________________________________________________________________________________________________________________
# Scaling & Power Transform
class Scaling_and_Power_transformation(BaseEstimator, TransformerMixin):
"""
-Given a data set, applies Min Max, Standar Scaler or Power Transformation (yeo-johnson)
-it is recommended to run Define_dataTypes first
- ignores target variable
Args:
target: string , name of the target variable
function_to_apply: string , default 'zscore' (standard scaler), all other {'minmaxm','yj','quantile','robust','maxabs'} ( min max,yeo-johnson & quantile power transformation, robust and MaxAbs scaler )
"""
def __init__(self, target, function_to_apply="zscore", random_state_quantile=42):
self.target = target
self.function_to_apply = function_to_apply
self.random_state_quantile = random_state_quantile
# self.transform_target = transform_target
# self.ml_usecase = ml_usecase
def fit(self, dataset, y=None):
data = dataset
# we only want to apply if there are numeric columns
self.numeric_features = (
data.drop(self.target, axis=1, errors="ignore")
.select_dtypes(include=["float32", "float64", "int64"])
.columns
)
if len(self.numeric_features) > 0:
if self.function_to_apply == "zscore":
self.scale_and_power = StandardScaler()
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "minmax":
self.scale_and_power = MinMaxScaler()
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "yj":
self.scale_and_power = PowerTransformer(
method="yeo-johnson", standardize=True
)
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "quantile":
self.scale_and_power = QuantileTransformer(
random_state=self.random_state_quantile,
output_distribution="normal",
)
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "robust":
self.scale_and_power = RobustScaler()
self.scale_and_power.fit(data[self.numeric_features])
elif self.function_to_apply == "maxabs":
self.scale_and_power = MaxAbsScaler()
self.scale_and_power.fit(data[self.numeric_features])
return self
def transform(self, dataset, y=None):
data = dataset
if len(self.numeric_features) > 0:
self.data_t = pd.DataFrame(
self.scale_and_power.transform(data[self.numeric_features])
)
# we need to set the same index as original data
self.data_t.index = data.index
self.data_t.columns = self.numeric_features
for i in self.numeric_features:
data[i] = self.data_t[i]
return data
else:
return data
def fit_transform(self, dataset, y=None):
data = dataset
self.fit(data)
# convert target if appropriate
# default behavious is quantile transformer
# if ((self.ml_usecase == 'regression') and (self.transform_target == True)):
# self.scale_and_power_target = QuantileTransformer(random_state=self.random_state_quantile,output_distribution='normal')
# data[self.target]=self.scale_and_power_target.fit_transform(np.array(data[self.target]).reshape(-1,1))
return self.transform(data)
# ______________________________________________________________________________________________________________________
# Scaling & Power Transform
class Target_Transformation(BaseEstimator, TransformerMixin):
"""
- Applies Power Transformation (yeo-johnson , Box-Cox) to target variable (Applicable to Regression only)
- 'bc' for Box_Coc & 'yj' for yeo-johnson, default is Box-Cox
- if target containes negtive / zero values , yeo-johnson is automatically selected
"""
def __init__(self, target, function_to_apply="bc"):
self.target = target
if function_to_apply == "bc":
function_to_apply = "box-cox"
else:
function_to_apply = "yeo-johnson"
self.function_to_apply = function_to_apply
def inverse_transform(self, dataset, y=None):
data = self.p_transform_target.inverse_transform(
np.array(dataset).reshape(-1, 1)
)
return data
def fit(self, dataset, y=None):
self.fit_transform(dataset, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
if self.target in dataset.columns:
# apply transformation
data[self.target] = self.p_transform_target.transform(
np.array(data[self.target]).reshape(-1, 1)
)
return data
def fit_transform(self, dataset, y=None):
data = dataset
# if target has zero or negative values use yj instead
if any(data[self.target] <= 0):
self.function_to_apply = "yeo-johnson"
# apply transformation
self.p_transform_target = PowerTransformer(method=self.function_to_apply)
data[self.target] = self.p_transform_target.fit_transform(
np.array(data[self.target]).reshape(-1, 1)
)
return data
# __________________________________________________________________________________________________________________________
# Time feature extractor
class Make_Time_Features(BaseEstimator, TransformerMixin):
"""
-Given a time feature , it extracts more features
- Only accepts / works where feature / data type is datetime64[ns]
- full list of features is:
['month','weekday',is_month_end','is_month_start','hour']
- all extracted features are defined as string / object
-it is recommended to run Define_dataTypes first
Args:
time_feature: list of feature names as datetime64[ns] , default empty/none , if empty/None , it will try to pickup dates automatically where data type is datetime64[ns]
list_of_features: list of required features , default value ['month','weekday','is_month_end','is_month_start','hour']
"""
def __init__(
self,
time_feature=None,
list_of_features=["month", "weekday", "is_month_end", "is_month_start", "hour"],
):
self.time_feature = time_feature
self.list_of_features = set(list_of_features)
def fit(self, data, y=None):
if self.time_feature is None:
self.time_feature = data.select_dtypes(include=["datetime64[ns]"]).columns
self.has_hour_ = set()
for i in self.time_feature:
if "hour" in self.list_of_features:
if any(x.hour for x in data[i]):
self.has_hour_.add(i)
return self
def transform(self, dataset, y=None):
data = dataset.copy()
# run fit transform first
def get_time_features(r):
features = []
if "month" in self.list_of_features:
features.append(("_month", str(r.month)))
if "weekday" in self.list_of_features:
features.append(("_weekday", str(r.weekday())))
if "is_month_end" in self.list_of_features:
features.append(
(
"_is_month_end",
"1"
if calendar.monthrange(r.year, r.month)[1] == r.day
else "0",
)
)
if "is_month_start" in self.list_of_features:
features.append(("_is_month_start", "1" if r.day == 1 else "0"))
return tuple(features)
# start making features for every column in the time list
for i in self.time_feature:
list_of_features = [get_time_features(r) for r in data[i]]
fd = defaultdict(list)
for x in list_of_features:
for k, v in x:
fd[k].append(v)
for k, v in fd.items():
data[i + k] = v
# make hour column if choosen
if "hour" in self.list_of_features and i in self.has_hour_:
h = [r.hour for r in data[i]]
data[f"{i}_hour"] = h
data[f"{i}_hour"] = data[f"{i}_hour"].apply(str)
# we dont need time columns any more
data.drop(self.time_feature, axis=1, inplace=True)
return data
def fit_transform(self, dataset, y=None):
# if no columns names are given , then pick datetime columns
self.fit(dataset, y=y)
return self.transform(dataset, y=y)
# ____________________________________________________________________________________________________________________________________________________________________
# Ordinal transformer
class Ordinal(BaseEstimator, TransformerMixin):
"""
- converts categorical features into ordinal values
- takes a dataframe , and information about column names and ordered categories as dict
- returns float panda data frame
"""
def __init__(self, info_as_dict):
self.info_as_dict = info_as_dict
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
new_data_test = pd.DataFrame(
self.enc.transform(data[self.info_as_dict.keys()]),
columns=self.info_as_dict.keys(),
index=data.index,
)
for i in self.info_as_dict.keys():
data[i] = new_data_test[i]
return data
def fit_transform(self, dataset, y=None):
data = dataset
# creat categories from given keys in the data set
cat_list = []
for i in self.info_as_dict.values():
i = [np.array(i)]
cat_list = cat_list + i
# now do fit transform
self.enc = OrdinalEncoder(categories=cat_list)
new_data_train = pd.DataFrame(
self.enc.fit_transform(data.loc[:, self.info_as_dict.keys()]),
columns=self.info_as_dict,
index=data.index,
)
# new_data = pd.DataFrame(self.enc.fit_transform(data.loc[:,self.info_as_dict.keys()]))
for i in self.info_as_dict.keys():
data[i] = new_data_train[i]
return data
# _______________________________________________________________________________________________________________________
# make dummy variables
class Dummify(BaseEstimator, TransformerMixin):
"""
- makes one hot encoded variables for dummy variable
- it is HIGHLY recommended to run the Select_Data_Type class first
- Ignores target variable
Args:
target: string , name of the target variable
"""
def __init__(self, target):
self.target = target
# creat ohe object
self.ohe = OneHotEncoder(handle_unknown="ignore", dtype=np.float32)
def fit(self, X, y=None):
data = X
# will only do this if there are categorical variables
if len(data.select_dtypes(include=("object")).columns) > 0:
# we need to learn the column names once the training data set is dummify
# save non categorical data
self.data_nonc = data.drop(
self.target, axis=1, errors="ignore"
).select_dtypes(exclude=("object"))
if self.target in data.columns:
self.target_column = data[[self.target]]
else:
self.target_column = None
# # plus we will only take object data types
categorical_data = data.drop(
self.target, axis=1, errors="ignore"
).select_dtypes(include=("object"))
# # now fit the training column
self.ohe.fit(categorical_data)
self.data_columns = self.ohe.get_feature_names(categorical_data.columns)
return self
def transform(self, X, y=None):
data = X.copy()
# will only do this if there are categorical variables
if len(data.select_dtypes(include=("object")).columns) > 0:
# only for test data
self.data_nonc = data.drop(
self.target, axis=1, errors="ignore"
).select_dtypes(exclude=("object"))
# fit without target and only categorical columns
array = self.ohe.transform(
data.drop(self.target, axis=1, errors="ignore").select_dtypes(
include=("object")
)
).toarray()
data_dummies = pd.DataFrame(array, columns=self.data_columns)
data_dummies.index = self.data_nonc.index
if self.target in data.columns:
target_column = data[[self.target]]
else:
target_column = None
# now put target , numerical and categorical variables back togather
data = pd.concat((target_column, self.data_nonc, data_dummies), axis=1)
del self.data_nonc
return data
else:
return data
def fit_transform(self, dataset, y=None):
data = dataset.copy()
# will only do this if there are categorical variables
if len(data.select_dtypes(include=("object")).columns) > 0:
self.fit(data)
# fit without target and only categorical columns
array = self.ohe.transform(
data.drop(self.target, axis=1, errors="ignore").select_dtypes(
include=("object")
)
).toarray()
data_dummies = pd.DataFrame(array, columns=self.data_columns)
data_dummies.index = self.data_nonc.index
# now put target , numerical and categorical variables back togather
data = pd.concat((self.target_column, self.data_nonc, data_dummies), axis=1)
# remove unwanted attributes
del (self.target_column, self.data_nonc)
return data
else:
return data
# _______________________________________________________________________________________________________________________
# Outlier
class Outlier(BaseEstimator, TransformerMixin):
"""
- Removes outlier using ABOD,KNN,IFO,PCA & HOBS using hard voting
- Only takes numerical / One Hot Encoded features
"""
def __init__(
self, target, contamination=0.20, random_state=42, methods=["knn", "iso", "pca"]
):
self.target = target
self.contamination = contamination
self.random_state = random_state
self.methods = methods
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, data, y=None):
return data
def fit_transform(self, dataset, y=None):
# dummify if there are any obects
if len(dataset.select_dtypes(include="object").columns) > 0:
self.dummy = Dummify(self.target)
data = self.dummy.fit_transform(dataset)
else:
data = dataset
data_without_target = data.drop(self.target, axis=1)
if "knn" in self.methods:
self.knn = KNN(contamination=self.contamination)
self.knn.fit(data_without_target)
knn_predict = self.knn.predict(data_without_target)
data_without_target["knn"] = knn_predict
if "iso" in self.methods:
self.iso = IForest(
contamination=self.contamination,
random_state=self.random_state,
behaviour="new",
)
self.iso.fit(data_without_target)
iso_predict = self.iso.predict(data_without_target)
data_without_target["iso"] = iso_predict
if "pca" in self.methods:
self.pca = PCA_od(
contamination=self.contamination, random_state=self.random_state
)
self.pca.fit(data_without_target)
pca_predict = self.pca.predict(data_without_target)
data_without_target["pca"] = pca_predict
data_without_target["vote_outlier"] = data_without_target[self.methods].sum(
axis=1
)
self.outliers = data_without_target[
data_without_target["vote_outlier"] == len(self.methods)
].index
return dataset[~dataset.index.isin(self.outliers)]
# ____________________________________________________________________________________________________________________________________________________________________
# Column Name cleaner transformer
class Clean_Colum_Names(BaseEstimator, TransformerMixin):
"""
- Cleans special chars that are not supported by jason format
"""
def fit(self, data, y=None):
return self
def transform(self, dataset, y=None):
data = dataset
data.columns = data.columns.str.replace(r"[\,\}\{\]\[\:\"\']", "")
return data
def fit_transform(self, dataset, y=None):
return self.transform(dataset, y=y)
# __________________________________________________________________________________________________________________________________________________________________________
# Clustering entire data
class Cluster_Entire_Data(BaseEstimator, TransformerMixin):
"""
- Applies kmeans clustering to the entire data set and produce clusters
- Highly recommended to run the DataTypes_Auto_infer class first
Args:
target_variable: target variable (integer or numerical only)
check_clusters_upto: to determine optimum number of kmeans clusters, set the uppler limit of clusters
"""
def __init__(self, target, check_clusters=20, random_state=42):
self.target = target
self.check_clusters = check_clusters + 1
self.random_state = random_state
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
data = data.drop(self.target, axis=1, errors="ignore")
# first convert to dummy
if len(data.select_dtypes(include="object").columns) > 0:
data_t1 = self.dummy.transform(data)
else:
data_t1 = data
# # # now make PLS
# # data_t1 = self.pls.transform(data_t1)
# # data_t1 = self.pca.transform(data_t1)
# # now predict with the clustes
predict = pd.DataFrame(self.k_object.predict(data_t1), index=data.index)
data["data_cluster"] = predict
data["data_cluster"] = data["data_cluster"].astype("object")
if self.target in dataset.columns:
data[self.target] = dataset[self.target]
return data
def fit_transform(self, dataset, y=None):
data = dataset.copy()
# first convert to dummy (if there are objects in data set)
if len(data.select_dtypes(include="object").columns) > 0:
self.dummy = Dummify(self.target)
data_t1 = self.dummy.fit_transform(data)
data_t1 = data_t1.drop(self.target, axis=1)
else:
data_t1 = data.drop(self.target, axis=1)
# now make PLS
# self.pls = PLSRegression(n_components=len(data_t1.columns)-1)
# data_t1 = self.pls.fit_transform(data_t1.drop(self.target,axis=1),data_t1[self.target])[0]
# self.pca = PCA(n_components=len(data_t1.columns)-1)
# data_t1 = self.pca.fit_transform(data_t1.drop(self.target,axis=1))
# we are goign to make a place holder , for 2 to 20 clusters
self.ph = pd.DataFrame(
np.arange(2, self.check_clusters, 1), columns=["clusters"]
)
self.ph["Silhouette"] = float(0)
self.ph["calinski"] = float(0)
# Now start making clusters
for k in self.ph.index:
c = self.ph["clusters"][k]
self.k_object = cluster.KMeans(
n_clusters=c,
init="k-means++",
precompute_distances="auto",
n_init=10,
random_state=self.random_state,
)
self.k_object.fit(data_t1)
self.ph.iloc[k, 1] = metrics.silhouette_score(
data_t1, self.k_object.labels_
)
self.ph.iloc[k, 2] = metrics.calinski_harabasz_score(
data_t1, self.k_object.labels_
)
# now standardize the scores and make a total column
m = MinMaxScaler((-1, 1))
self.ph["calinski"] = m.fit_transform(
np.array(self.ph["calinski"]).reshape(-1, 1)
)
self.ph["Silhouette"] = m.fit_transform(
np.array(self.ph["Silhouette"]).reshape(-1, 1)
)
self.ph["total"] = self.ph["Silhouette"] + self.ph["calinski"]
# sort it by total column and take the first row column 0 , that would represent the optimal clusters
try:
self.clusters = int(
self.ph[self.ph["total"] == max(self.ph["total"])]["clusters"]
)
except: # in case there isnt a decisive measure , take calinski as yeard stick
self.clusters = int(
self.ph[self.ph["calinski"] == max(self.ph["calinski"])]["clusters"]
)
# Now make the final cluster object
self.k_object = cluster.KMeans(
n_clusters=self.clusters,
init="k-means++",
precompute_distances="auto",
n_init=10,
random_state=self.random_state,
)
# now do fit predict
predict = pd.DataFrame(self.k_object.fit_predict(data_t1), index=data.index)
data["data_cluster"] = predict
data["data_cluster"] = data["data_cluster"].astype("object")
if self.target in dataset.columns:
data[self.target] = dataset[self.target]
return data
# __________________________________________________________________________________________________________________________________________
# Clustering catagorical data
class Reduce_Cardinality_with_Clustering(BaseEstimator, TransformerMixin):
"""
- Reduces the level of catagorical column / cardinality through clustering
- Highly recommended to run the DataTypes_Auto_infer class first
Args:
target_variable: target variable (integer or numerical only)
catagorical_feature: list of features on which clustering is to be applied / cardinality to be reduced
check_clusters_upto: to determine optimum number of kmeans clusters, set the uppler limit of clusters
"""
def __init__(
self, target, catagorical_feature=[], check_clusters=30, random_state=42,
):
self.target = target
self.catagorical_feature = catagorical_feature
self.check_clusters = check_clusters + 1
self.random = random_state
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
# we already know which leval belongs to whihc cluster , so all w need is to replace levels with clusters we already have from training data set
for i, z in zip(self.catagorical_feature, self.ph_data):
data[i] = data[i].replace(list(z["levels"]), z["cluster"])
return data
def fit_transform(self, dataset, y=None):
data = dataset.copy()
# first convert to dummy
if len(data.select_dtypes(include="object").columns) > 0:
self.dummy = Dummify(self.target)
data_t = self.dummy.fit_transform(
data.drop(self.catagorical_feature, axis=1)
)
# data_t1 = data_t1.drop(self.target,axis=1)
else:
data_t = data.drop(self.catagorical_feature, axis=1)
# now make PLS
self.pls = PLSRegression(
n_components=2
) # since we are only using two componenets to group #PLSRegression(n_components=len(data_t1.columns)-1)
data_pls = self.pls.fit_transform(
data_t.drop(self.target, axis=1), data_t[self.target]
)[0]
# # now we will take one component and then we calculate mean, median, min, max and sd of that one component grouped by the catagorical levels
self.ph_data = []
self.ph_clusters = []
for i in self.catagorical_feature:
data_t1 = pd.DataFrame(
dict(levels=data[i], comp1=data_pls[:, 0], comp2=data_pls[:, 1]),
index=data.index,
)
# now group by feature
data_t1 = data_t1.groupby("levels")
data_t1 = data_t1[["comp1", "comp2"]].agg(
["mean", "median", "min", "max", "std"]
) # this gives us a df with only numeric columns (min , max ) and level as index
# some time if a level has only one record its std will come up as NaN, so convert NaN to 1
data_t1.fillna(1, inplace=True)
# now number of clusters cant be more than the number of samples in aggregated data , so
self.check_clusters = min(self.check_clusters, len(data_t1))
# # we are goign to make a place holder , for 2 to 20 clusters
self.ph = pd.DataFrame(
np.arange(2, self.check_clusters, 1), columns=["clusters"]
)
self.ph["Silhouette"] = float(0)
self.ph["calinski"] = float(0)
# Now start making clusters
for k in self.ph.index:
c = self.ph["clusters"][k]
self.k_object = cluster.KMeans(
n_clusters=c,
init="k-means++",
precompute_distances="auto",
n_init=10,
random_state=self.random,
)
self.k_object.fit(data_t1)
self.ph.iloc[k, 1] = metrics.silhouette_score(
data_t1, self.k_object.labels_
)
self.ph.iloc[k, 2] = metrics.calinski_harabasz_score(
data_t1, self.k_object.labels_
)
# now standardize the scores and make a total column
m = MinMaxScaler((-1, 1))
self.ph["calinski"] = m.fit_transform(
np.array(self.ph["calinski"]).reshape(-1, 1)
)
self.ph["Silhouette"] = m.fit_transform(
np.array(self.ph["Silhouette"]).reshape(-1, 1)
)
self.ph["total"] = self.ph["Silhouette"] + self.ph["calinski"]
# sort it by total column and take the first row column 0 , that would represent the optimal clusters
try:
self.clusters = int(
self.ph[self.ph["total"] == max(self.ph["total"])]["clusters"]
)
except: # in case there isnt a decisive measure , take calinski as yeard stick
self.clusters = int(
self.ph[self.ph["calinski"] == max(self.ph["calinski"])]["clusters"]
)
self.ph_clusters.append(self.ph)
# Now make the final cluster object
self.k_object = cluster.KMeans(
n_clusters=self.clusters,
init="k-means++",
precompute_distances="auto",
n_init=10,
random_state=self.random,
)
# now do fit predict
predict = self.k_object.fit_predict(data_t1)
# put it back with the group by aggregate columns
data_t1["cluster"] = predict
data_t1["cluster"] = data_t1["cluster"].apply(str)
# now we dont need all the columns, only the cluster column is required along with the index (index also has a name , we groupy as "levels")
data_t1 = data_t1[["cluster"]]
# now convert index ot the column
data_t1.reset_index(
level=0, inplace=True
) # this table now only contains every level and its cluster
# self.data_t1= data_t1
# we can now replace cluster with the original level in the original data frame
data[i] = data[i].replace(list(data_t1["levels"]), data_t1["cluster"])
self.ph_data.append(data_t1)
if self.target in dataset.columns:
data[self.target] = dataset[self.target]
return data
# ____________________________________________________________________________________________________________________________________________
# Clustering catagorical data
class Reduce_Cardinality_with_Counts(BaseEstimator, TransformerMixin):
"""
- Reduces the level of catagorical column by replacing levels with their count & converting objects into float
Args:
catagorical_feature: list of features on which clustering is to be applied
"""
def __init__(self, catagorical_feature=[], float_dtype="float32"):
self.catagorical_feature = catagorical_feature
self.float_dtype = float_dtype
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
# we already know level counts
for i, z, k in zip(self.catagorical_feature, self.ph_data, self.ph_u):
data[i] = data[i].replace(k, z["counts"])
data[i] = data[i].astype(self.float_dtype)
return data
def fit_transform(self, dataset, y=None):
data = dataset
#
self.ph_data = []
self.ph_u = []
for i in self.catagorical_feature:
data_t1 = pd.DataFrame(
dict(
levels=data[i].groupby(data[i], sort=False).count().index,
counts=data[i].groupby(data[i], sort=False).count().values,
)
)
u = data[i].unique()
# replace levels with counts
data[i].replace(u, data_t1["counts"], inplace=True)
data[i] = data[i].astype(self.float_dtype)
self.ph_data.append(data_t1)
self.ph_u.append(u)
return data
# ____________________________________________________________________________________________________________________________________________
# take noneliner transformations
class Make_NonLiner_Features(BaseEstimator, TransformerMixin):
"""
- convert numerical features into polynomial features
- it is HIGHLY recommended to run the Autoinfer_Data_Type class first
- Ignores target variable
- it picks up data type float32 as numerical
- for multiclass classification problem , set subclass arg to 'multi'
Args:
target: string , name of the target variable
Polynomial_degree: int ,default 2
"""
def __init__(
self,
target,
ml_usecase="classification",
polynomial_degree=2,
other_nonliner_features=["sin", "cos", "tan"],
top_features_to_pick=0.20,
random_state=42,
subclass="ignore",
n_jobs=1,
float_dtype="float32",
):
self.target = target
self.polynomial_degree = polynomial_degree
self.ml_usecase = ml_usecase
self.other_nonliner_features = other_nonliner_features
self.top_features_to_pick = top_features_to_pick
self.random_state = random_state
self.subclass = subclass
self.n_jobs = n_jobs
self.float_dtype = float_dtype
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None): # same application for test and train
data = dataset
self.numeric_columns = (
data.drop(self.target, axis=1, errors="ignore")
.select_dtypes(include=self.float_dtype)
.columns
)
if self.polynomial_degree >= 2: # dont run anything if powr is les than 2
# self.numeric_columns = data.drop(self.target,axis=1,errors='ignore').select_dtypes(include="float32").columns
# start taking powers
for i in range(2, self.polynomial_degree + 1):
ddc_power = np.power(data[self.numeric_columns], i)
ddc_col = list(ddc_power.columns)
ii = str(i)
ddc_col = [ddc_col + "_Power" + ii for ddc_col in ddc_col]
ddc_power.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_power),axis=1)
else:
ddc_power = pd.DataFrame()
# take sin:
if "sin" in self.other_nonliner_features:
ddc_sin = np.sin(data[self.numeric_columns])
ddc_col = list(ddc_sin.columns)
ddc_col = ["sin(" + i + ")" for i in ddc_col]
ddc_sin.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_sin),axis=1)
else:
ddc_sin = pd.DataFrame()
# take cos:
if "cos" in self.other_nonliner_features:
ddc_cos = np.cos(data[self.numeric_columns])
ddc_col = list(ddc_cos.columns)
ddc_col = ["cos(" + i + ")" for i in ddc_col]
ddc_cos.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_cos),axis=1)
else:
ddc_cos = pd.DataFrame()
# take tan:
if "tan" in self.other_nonliner_features:
ddc_tan = np.tan(data[self.numeric_columns])
ddc_col = list(ddc_tan.columns)
ddc_col = ["tan(" + i + ")" for i in ddc_col]
ddc_tan.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_tan),axis=1)
else:
ddc_tan = pd.DataFrame()
# dummy_all
dummy_all = pd.concat((data, ddc_power, ddc_sin, ddc_cos, ddc_tan), axis=1)
# we can select top features using RF
# # and we only want to do this if the dummy all have more than 50 features
# if len(dummy_all.columns) > 71:
dummy_all = dummy_all[self.columns_to_keep]
if self.target in dataset.columns:
dummy_all[self.target] = dataset[self.target]
return dummy_all
def fit_transform(self, dataset, y=None):
data = dataset
self.numeric_columns = (
data.drop(self.target, axis=1, errors="ignore")
.select_dtypes(include=self.float_dtype)
.columns
)
if self.polynomial_degree >= 2: # dont run anything if powr is les than 2
# self.numeric_columns = data.drop(self.target,axis=1,errors='ignore').select_dtypes(include="float32").columns
# start taking powers
for i in range(2, self.polynomial_degree + 1):
ddc_power = np.power(data[self.numeric_columns], i)
ddc_col = list(ddc_power.columns)
ii = str(i)
ddc_col = [ddc_col + "_Power" + ii for ddc_col in ddc_col]
ddc_power.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_power),axis=1)
else:
ddc_power = pd.DataFrame()
# take sin:
if "sin" in self.other_nonliner_features:
ddc_sin = np.sin(data[self.numeric_columns])
ddc_col = list(ddc_sin.columns)
ddc_col = ["sin(" + i + ")" for i in ddc_col]
ddc_sin.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_sin),axis=1)
else:
ddc_sin = pd.DataFrame()
# take cos:
if "cos" in self.other_nonliner_features:
ddc_cos = np.cos(data[self.numeric_columns])
ddc_col = list(ddc_cos.columns)
ddc_col = ["cos(" + i + ")" for i in ddc_col]
ddc_cos.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_cos),axis=1)
else:
ddc_cos = pd.DataFrame()
# take tan:
if "tan" in self.other_nonliner_features:
ddc_tan = np.tan(data[self.numeric_columns])
ddc_col = list(ddc_tan.columns)
ddc_col = ["tan(" + i + ")" for i in ddc_col]
ddc_tan.columns = ddc_col
# put it back with data dummy
# data = pd.concat((data,ddc_tan),axis=1)
else:
ddc_tan = pd.DataFrame()
# dummy_all
dummy_all = pd.concat(
(data[[self.target]], ddc_power, ddc_sin, ddc_cos, ddc_tan), axis=1
)
# we can select top features using our Feature Selection Classic transformer
afs = Advanced_Feature_Selection_Classic(
target=self.target,
ml_usecase=self.ml_usecase,
top_features_to_pick=self.top_features_to_pick,
random_state=self.random_state,
subclass=self.subclass,
n_jobs=self.n_jobs,
)
dummy_all_t = afs.fit_transform(dummy_all)
data = pd.concat((data, dummy_all_t), axis=1)
# # making sure no duplicated columns are there
data = data.loc[:, ~data.columns.duplicated()]
self.columns_to_keep = data.drop(self.target, axis=1).columns
return data
# ______________________________________________________________________________________________________________________________________________________
# Feature Selection
class Advanced_Feature_Selection_Classic(BaseEstimator, TransformerMixin):
"""
- Selects important features and reduces the feature space. Feature selection is based on Random Forest , Light GBM and Correlation
- to run on multiclass classification , set the subclass argument to 'multi'
"""
def __init__(
self,
target,
ml_usecase="classification",
top_features_to_pick=0.10,
random_state=42,
subclass="ignore",
n_jobs=1,
):
self.target = target
self.ml_usecase = ml_usecase
self.top_features_to_pick = 1 - top_features_to_pick
self.random_state = random_state
self.subclass = subclass
self.n_jobs = n_jobs
def fit(self, dataset, y=None):
self.fit_transform(dataset, y=y)
return self
def transform(self, dataset, y=None):
# return the data with onlys specific columns
data = dataset
# self.selected_columns.remove(self.target)
data = data[self.selected_columns_test]
if self.target in dataset.columns:
data[self.target] = dataset[self.target]
return data
def fit_transform(self, dataset, y=None):
dummy_all = dataset.copy()
dummy_all[self.target] = dummy_all[self.target].astype("float32")
# Random Forest
max_fe = min(70, int(np.sqrt(len(dummy_all.columns))))
max_sa = min(1000, int(np.sqrt(len(dummy_all))))
if self.ml_usecase == "classification":
m = rfc(
100,
max_depth=5,
max_features=max_fe,
n_jobs=self.n_jobs,
max_samples=max_sa,
random_state=self.random_state,
)
else:
m = rfr(
100,
max_depth=5,
max_features=max_fe,
n_jobs=self.n_jobs,
max_samples=max_sa,
random_state=self.random_state,
)
m.fit(dummy_all.drop(self.target, axis=1), dummy_all[self.target])
# self.fe_imp_table= pd.DataFrame(m.feature_importances_,columns=['Importance'],index=dummy_all.drop(self.target,axis=1).columns).sort_values(by='Importance',ascending= False)
self.fe_imp_table = pd.DataFrame(
m.feature_importances_,
columns=["Importance"],
index=dummy_all.drop(self.target, axis=1).columns,
)
self.fe_imp_table = self.fe_imp_table[
self.fe_imp_table["Importance"]
>= self.fe_imp_table.quantile(self.top_features_to_pick)[0]
]
top = self.fe_imp_table.index
dummy_all_columns_RF = dummy_all[top].columns
# LightGBM
max_fe = min(70, int(np.sqrt(len(dummy_all.columns))))
max_sa = min(
float(1000 / len(dummy_all)),
float(np.sqrt(len(dummy_all) / len(dummy_all))),
)
if self.ml_usecase == "classification":
m = lgbmc(
n_estimators=100,
max_depth=5,
n_jobs=self.n_jobs,
subsample=max_sa,
random_state=self.random_state,
)
else:
m = lgbmr(
n_estimators=100,
max_depth=5,
n_jobs=self.n_jobs,
subsample=max_sa,
random_state=self.random_state,
)
m.fit(dummy_all.drop(self.target, axis=1), dummy_all[self.target])
# self.fe_imp_table= pd.DataFrame(m.feature_importances_,columns=['Importance'],index=dummy_all.drop(self.target,axis=1).columns).sort_values(by='Importance',ascending= False)
self.fe_imp_table = pd.DataFrame(
m.feature_importances_,
columns=["Importance"],
index=dummy_all.drop(self.target, axis=1).columns,
)
self.fe_imp_table = self.fe_imp_table[
self.fe_imp_table["Importance"]
>= self.fe_imp_table.quantile(self.top_features_to_pick)[0]
]
top = self.fe_imp_table.index
dummy_all_columns_LGBM = dummy_all[top].columns
# we can now select top correlated feature
if self.subclass != "multi":
corr = pd.DataFrame(np.corrcoef(dummy_all.T))
corr.columns = dummy_all.columns
corr.index = dummy_all.columns
# corr = corr[self.target].abs().sort_values(ascending=False)[0:self.top_features_to_pick+1]
corr = corr[self.target].abs()
corr = corr[corr.index != self.target] # drop the target column
corr = corr[corr >= corr.quantile(self.top_features_to_pick)]
corr = pd.DataFrame(dict(features=corr.index, value=corr)).reset_index(
drop=True
)
corr = corr.drop_duplicates(subset="value")
corr = corr["features"]
# corr = pd.DataFrame(dict(features=corr.index,value=corr)).reset_index(drop=True)
# corr = corr.drop_duplicates(subset='value')[0:self.top_features_to_pick+1]
# corr = corr['features']
else:
corr = list()
self.dummy_all_columns_RF = dummy_all_columns_RF
self.dummy_all_columns_LGBM = dummy_all_columns_LGBM
self.corr = corr
self.selected_columns = list(
set(
[self.target]
+ list(dummy_all_columns_RF)
+ list(corr)
+ list(dummy_all_columns_LGBM)
)
)
self.selected_columns_test = (
dataset[self.selected_columns].drop(self.target, axis=1).columns
)
return dataset[self.selected_columns]
# _
# ______________________________________________________________________________________________________________________________________________________
# Boruta Feature Selection algorithm
# Base on: https://github.com/scikit-learn-contrib/boruta_py/blob/master/boruta/boruta_py.py
class Boruta_Feature_Selection(BaseEstimator, TransformerMixin):
"""
Boruta selection algorithm based on borutaPy sklearn-contrib and
<NAME>, https://m2.icm.edu.pl/boruta/
Selects the most important features.
Args:
target (str): target column name
ml_usecase (str): case: classification or regression
top_features_to_pick: to make...
max_iteration {int): overall iterations of shuffle and train forests
alpha {float): p-value on which
the option to favour one measur to another. e.g. if value is .6 , during feature selection tug of war, correlation target measure will have a higher say.
A value of .5 means both measure have equal say
"""
def __init__(
self,
target,
ml_usecase="classification",
top_features_to_pick=1.0,
max_iteration=200,
n_iter_no_change=25,
alpha=0.05,
random_state=42,
subclass="ignore",
n_jobs=1,
):
self.target = target
self.ml_usecase = ml_usecase
self.top_features_to_pick = top_features_to_pick
self.random_state = random_state
self.subclass = subclass
self.max_iteration = max_iteration
self.n_iter_no_change = n_iter_no_change
self.alpha = alpha
self.selected_columns_test = []
self.n_jobs = n_jobs
@property
def selected_columns(self):
return self.selected_columns_test + [self.target]
def fit(self, dataset, y=None):
from .patches.boruta_py import BorutaPyPatched
dummy_data = dataset
X, y = dummy_data.drop(self.target, axis=1), dummy_data[self.target].values
y = y.astype("float32")
X_cols = X.columns
X = X.values
if self.ml_usecase == "classification":
m = rfc(
100,
max_depth=5,
n_jobs=self.n_jobs,
random_state=self.random_state,
class_weight="balanced",
)
else:
m = rfr(
100, max_depth=5, n_jobs=self.n_jobs, random_state=self.random_state,
)
feat_selector = BorutaPyPatched(
m,
n_estimators="auto",
perc=int(self.top_features_to_pick * 100),
max_iter=self.max_iteration,
random_state=self.random_state,
early_stopping=(self.n_iter_no_change > 0),
n_iter_no_change=self.n_iter_no_change,
)
try:
feat_selector.fit(X, y)
self.selected_columns_test = list(X_cols[feat_selector.support_])
except:
# boruta may errors out if all features are selected
self.selected_columns_test = list(X_cols)
return self
def transform(self, dataset, y=None):
if self.target in dataset.columns:
return dataset[self.selected_columns]
else:
return dataset[self.selected_columns_test]
def fit_transform(self, dataset, y=None):
self.fit(dataset, y=y)
return self.transform(dataset, y=y)
# _________________________________________________________________________________________________________________________________________
class Fix_multicollinearity(BaseEstimator, TransformerMixin):
"""
Fixes multicollinearity between predictor variables , also considering the correlation between target variable.
Only applies to regression or two class classification ML use case
Takes numerical and one hot encoded variables only
Args:
threshold (float): The utmost absolute pearson correlation tolerated beyween featres from 0.0 to 1.0
target_variable (str): The target variable/column name
correlation_with_target_threshold: minimum absolute correlation required between every feature and the target variable , default 1.0 (0.0 to 1.0)
correlation_with_target_preference: float (0.0 to 1.0), default .08 ,while choosing between a pair of features w.r.t multicol & correlation target , this gives
the option to favour one measur to another. e.g. if value is .6 , during feature selection tug of war, correlation target measure will have a higher say.
A value of .5 means both measure have equal say
"""
# mamke a constructer
def __init__(
self,
threshold,
target_variable,
correlation_with_target_threshold=0.0,
correlation_with_target_preference=1.0,
):
self.threshold = threshold
self.target_variable = target_variable
self.correlation_with_target_threshold = correlation_with_target_threshold
self.correlation_with_target_preference = correlation_with_target_preference
self.target_corr_weight = correlation_with_target_preference
self.multicol_weight = 1 - correlation_with_target_preference
# Make fit method
def fit(self, data, y=None):
"""
Args:
data = takes preprocessed data frame
Returns:
None
"""
if data[self.target_variable].dtype not in ["int32", "int64", "float32", "float64"]:
raise ValueError('dtype for the target variable should be int32, int64, float32, or float64 only')
# global data1
data1 = data.select_dtypes(include=["int32", "int64", "float32", "float64"])
# try:
# self.data1 = self.data1.astype('float16')
# except:
# None
# make an correlation db with abs correlation db
# self.data_c = self.data1.T.drop_duplicates()
# self.data1 = self.data_c.T
corr = pd.DataFrame(np.corrcoef(data1.T))
corr.columns = data1.columns
corr.index = data1.columns
# corr_matrix = abs(data1.corr())
corr_matrix = abs(corr)
# for every diagonal value, make it Nan
corr_matrix.values[
tuple([np.arange(corr_matrix.shape[0])] * 2)
] = np.NaN
# Now Calculate the average correlation of every feature with other, and get a pandas data frame
avg_cor = pd.DataFrame(corr_matrix.mean())
avg_cor["feature"] = avg_cor.index
avg_cor.reset_index(drop=True, inplace=True)
avg_cor.columns = ["avg_cor", "features"]
# Calculate the correlation with the target
targ_cor = pd.DataFrame(corr_matrix[self.target_variable].dropna())
targ_cor["feature"] = targ_cor.index
targ_cor.reset_index(drop=True, inplace=True)
targ_cor.columns = ["target_variable", "features"]
# Now, add a column for variable name and drop index
corr_matrix["column"] = corr_matrix.index
corr_matrix.reset_index(drop=True, inplace=True)
# now we need to melt it , so that we can correlation pair wise , with two columns
cols = corr_matrix.column
melt = (
corr_matrix.melt(id_vars=["column"], value_vars=cols)
.sort_values(by="value", ascending=False)
.dropna()
)
# now bring in the avg correlation for first of the pair
merge = pd.merge(
melt, avg_cor, left_on="column", right_on="features"
).drop("features", axis=1)
# now bring in the avg correlation for second of the pair
merge = pd.merge(
merge, avg_cor, left_on="variable", right_on="features"
).drop("features", axis=1)
# now bring in the target correlation for first of the pair
merge = pd.merge(
merge, targ_cor, left_on="column", right_on="features"
).drop("features", axis=1)
# now bring in the avg correlation for second of the pair
merge = pd.merge(
merge, targ_cor, left_on="variable", right_on="features"
).drop("features", axis=1)
# sort and save
merge = merge.sort_values(by="value", ascending=False)
# we need to now eleminate all the pairs that are actually duplicate e.g cor(x,y) = cor(y,x) , they are the same , we need to find these and drop them
merge["all_columns"] = merge["column"] + merge["variable"]
# this puts all the coresponding pairs of features togather , so that we can only take one, since they are just the duplicates
merge["all_columns"] = [sorted(i) for i in merge["all_columns"]]
# now sort by new column
merge = merge.sort_values(by="all_columns")
# take every second colums
merge = merge.iloc[::2, :]
# make a ranking column to eliminate features
merge["rank_x"] = round(
self.multicol_weight * (merge["avg_cor_y"] - merge["avg_cor_x"])
+ self.target_corr_weight
* (merge["target_variable_x"] - merge["target_variable_y"]),
6,
) # round it to 6 digits
## Now there will be rows where the rank will be exactly zero, these is where the value (corelartion between features) is exactly one ( like price and price^2)
## so in that case , we can simply pick one of the variable
# but since , features can be in either column, we will drop one column (say 'column') , only if the feature is not in the second column (in variable column)
# both equations below will return the list of columns to drop from here
# this is how it goes
## For the portion where correlation is exactly one !
one = merge[merge["rank_x"] == 0]
# this portion is complicated
# table one have all the paired variable having corelation of 1
# in a nutshell, we can take any column (one side of pair) and delete the other columns (other side of the pair)
# however one varibale can appear more than once on any of the sides , so we will run for loop to find all pairs...
# here it goes
# take a list of all (but unique ) variables that have correlation 1 for eachother, we will make two copies
u_all = list(
pd.unique(pd.concat((one["column"], one["variable"]), axis=0))
)
u_all_1 = list(
pd.unique(pd.concat((one["column"], one["variable"]), axis=0))
)
# take a list of features (unique) for the first side of the pair
u_column = pd.unique(one["column"])
# now we are going to start picking each variable from one column (one side of the pair) , check it against the other column (other side of the pair)
# to pull all coresponding / paired variables , and delete thoes newly varibale names from all unique list
for i in u_column:
# print(i)
r = one[one["column"] == i]["variable"]
for q in r:
if q in u_all:
# print("_"+q)
u_all.remove(q)
# now the unique column contains the varibales that should remain, so in order to get the variables that should be deleted :
to_drop = list(set(u_all_1) - set(u_all))
# to_drop_a =(list(set(one['column'])-set(one['variable'])))
# to_drop_b =(list(set(one['variable'])-set(one['column'])))
# to_drop = to_drop_a + to_drop_b
## now we are to treat where rank is not Zero and Value (correlation) is greater than a specific threshold
non_zero = merge[
(merge["rank_x"] != 0.0) & (merge["value"] >= self.threshold)
]
# pick the column to delete
non_zero_list = list(
np.where(
non_zero["rank_x"] < 0,
non_zero["column"],
non_zero["variable"],
)
)
# add two list
self.to_drop = to_drop + non_zero_list
# make sure that target column is not a part of the list
try:
self.to_drop.remove(self.target_variable)
except:
pass
# now we want to keep only the columns that have more correlation with traget by a threshold
self.to_drop_taret_correlation = []
if self.correlation_with_target_threshold != 0.0:
corr = pd.DataFrame(
np.corrcoef(data.drop(self.to_drop, axis=1).T),
columns=data.drop(self.to_drop, axis=1).columns,
index=data.drop(self.to_drop, axis=1).columns,
)
self.to_drop_taret_correlation = corr[self.target_variable].abs()
# to_drop_taret_correlation = data.drop(self.to_drop,axis=1).corr()[target_variable].abs()
self.to_drop_taret_correlation = self.to_drop_taret_correlation[
self.to_drop_taret_correlation < self.correlation_with_target_threshold
]
self.to_drop_taret_correlation = list(self.to_drop_taret_correlation.index)
# to_drop = corr + to_drop
try:
self.to_drop_taret_correlation.remove(self.target_variable)
except:
pass
return self
# now Transform
def transform(self, dataset, y=None):
"""
Args:f
data = takes preprocessed data frame
Returns:
data frame
"""
data = dataset
data = data.drop(self.to_drop, axis=1)
# now drop less correlated data
data.drop(self.to_drop_taret_correlation, axis=1, inplace=True, errors="ignore")
return data
# fit_transform
def fit_transform(self, data, y=None):
"""
Args:
data = takes preprocessed data frame
Returns:
data frame
"""
self.fit(data)
return self.transform(data)
# ____________________________________________________________________________________________________________________________________________________________________
# handle perfect multicollinearity
class Remove_100(BaseEstimator, TransformerMixin):
"""
- Takes DF, return data frame while removing features that are perfectly correlated (droping one)
"""
def __init__(self, target):
self.target = target
self.columns_to_drop = []
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
return dataset.drop(self.columns_to_drop, axis=1)
def fit_transform(self, dataset, y=None):
data = dataset
targetless_data = data.drop(self.target, axis=1)
# correlation should be calculated between at least two features, if there is only 1, there is nothing to delete
if len(targetless_data.columns) <= 1:
return data
corr = pd.DataFrame(np.corrcoef(targetless_data.T))
corr.columns = targetless_data.columns
corr.index = targetless_data.columns
corr_matrix = abs(corr)
# Now, add a column for variable name and drop index
corr_matrix["column"] = corr_matrix.index
corr_matrix.reset_index(drop=True, inplace=True)
# now we need to melt it , so that we can correlation pair wise , with two columns
cols = corr_matrix.column
melt = corr_matrix.melt(id_vars=["column"], value_vars=cols).sort_values(
by="value", ascending=False
) # .dropna()
melt["value"] = round(melt["value"], 2) # round it to two digits
# now pick variables where value is one and 'column' != variabe ( both columns are not same)
c1 = melt["value"] == 1.00
c2 = melt["column"] != melt["variable"]
melt = melt[((c1 == True) & (c2 == True))]
# we need to now eleminate all the pairs that are actually duplicate e.g cor(x,y) = cor(y,x) , they are the same , we need to find these and drop them
melt["all_columns"] = melt["column"] + melt["variable"]
# this puts all the coresponding pairs of features togather , so that we can only take one, since they are just the duplicates
melt["all_columns"] = [sorted(i) for i in melt["all_columns"]]
# # now sort by new column
melt = melt.sort_values(by="all_columns")
# # take every second colums
melt = melt.iloc[::2, :]
# lets keep the columns on the left hand side of the table
self.columns_to_drop = melt["variable"]
return data.drop(self.columns_to_drop, axis=1)
# _______________________________________________________________________________________________________________________________________________________________________________________________
# custome DFS
class DFS_Classic(BaseEstimator, TransformerMixin):
"""
- Automated feature interactions using multiplication, division , addition & substraction
- Only accepts numeric / One Hot Encoded features
- Takes DF, return same DF
- for Multiclass classification problem , set subclass arg as 'multi'
"""
def __init__(
self,
target,
ml_usecase="classification",
interactions=["multiply", "divide", "add", "subtract"],
top_n_correlated=0.05,
random_state=42,
subclass="ignore",
n_jobs=1,
):
self.target = target
self.interactions = interactions
self.top_n_correlated = top_n_correlated # (this will be 1- top_features , but handled in the Advance_feature_selection )
self.ml_usecase = ml_usecase
self.random_state = random_state
self.subclass = subclass
self.n_jobs = n_jobs
def fit(self, data, y=None):
self.fit_transform(data, y=y)
return self
def transform(self, dataset, y=None):
data = dataset
data_without_target = data.drop(self.target, axis=1, errors="ignore")
# for multiplication:
# we need bot catagorical and numerical columns
if "multiply" in self.interactions:
data_multiply = pd.concat(
[
data_without_target.mul(col[1], axis="index")
for col in data_without_target.iteritems()
],
axis=1,
)
data_multiply.columns = [
"_multiply_".join([i, j])
for j in data_without_target.columns
for i in data_without_target.columns
]
# we dont need to apply rest of conditions
data_multiply.index = data.index
else:
data_multiply = pd.DataFrame()
# for division, we only want it to apply to numerical columns
if "divide" in self.interactions:
data_divide = pd.concat(
[
data_without_target[self.numeric_columns].div(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_divide.columns = [
"_divide_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
data_divide.replace([np.inf, -np.inf], 0, inplace=True)
data_divide.fillna(0, inplace=True)
data_divide.index = data.index
else:
data_divide = pd.DataFrame()
# for addition, we only want it to apply to numerical columns
if "add" in self.interactions:
data_add = pd.concat(
[
data_without_target[self.numeric_columns].add(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_add.columns = [
"_add_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
data_add.index = data.index
else:
data_add = pd.DataFrame()
# for substraction, we only want it to apply to numerical columns
if "subtract" in self.interactions:
data_substract = pd.concat(
[
data_without_target[self.numeric_columns].sub(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_substract.columns = [
"_subtract_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
data_substract.index = data.index
else:
data_substract = pd.DataFrame()
# get all the dummy data combined
dummy_all = pd.concat(
(data, data_multiply, data_divide, data_add, data_substract), axis=1
)
del data_multiply
del data_divide
del data_add
del data_substract
# now only return the columns we want:
dummy_all = dummy_all[self.columns_to_keep]
if self.target in dataset.columns:
dummy_all[self.target] = dataset[self.target]
return dummy_all
def fit_transform(self, dataset, y=None):
data = dataset
data_without_target = data.drop(self.target, axis=1, errors="ignore")
# we need to seperate numerical and ont hot encoded columns
# self.ohe_columns = [i if ((len(data[i].unique())==2) & (data[i].unique()[0] in [0,1]) & (data[i].unique()[1] in [0,1]) ) else None for i in data.drop(self.target,axis=1).columns]
self.ohe_columns = [
i
for i in data.columns
if data[i].nunique() == 2
and data[i].unique()[0] in [0, 1]
and data[i].unique()[1] in [0, 1]
]
# self.ohe_columns = [i for i in self.ohe_columns if i is not None]
self.numeric_columns = [
i for i in data_without_target.columns if i not in self.ohe_columns
]
target_variable = data[[self.target]]
# for multiplication:
# we need bot catagorical and numerical columns
if "multiply" in self.interactions:
data_multiply = pd.concat(
[
data_without_target.mul(col[1], axis="index")
for col in data_without_target.iteritems()
],
axis=1,
)
data_multiply.columns = [
"_multiply_".join([i, j])
for j in data_without_target.columns
for i in data_without_target.columns
]
# we dont need columns that are self interacted
col = [
"_multiply_".join([i, j])
for j in data_without_target.columns
for i in data_without_target.columns
if i != j
]
data_multiply = data_multiply[col]
# we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)
col1 = [
i for i in data_multiply.columns if np.nansum(data_multiply[i]) != 0
]
data_multiply = data_multiply[col1]
data_multiply.index = data.index
else:
data_multiply = pd.DataFrame()
# for division, we only want it to apply to numerical columns
if "divide" in self.interactions:
data_divide = pd.concat(
[
data_without_target[self.numeric_columns].div(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_divide.columns = [
"_divide_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
# we dont need columns that are self interacted
col = [
"_divide_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
if i != j
]
data_divide = data_divide[col]
# we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)
col1 = [i for i in data_divide.columns if np.nansum(data_divide[i]) != 0]
data_divide = data_divide[col1]
# additionally we need to fill anll the possible NaNs
data_divide.replace([np.inf, -np.inf], 0, inplace=True)
data_divide.fillna(0, inplace=True)
data_divide.index = data.index
else:
data_divide = pd.DataFrame()
# for addition, we only want it to apply to numerical columns
if "add" in self.interactions:
data_add = pd.concat(
[
data_without_target[self.numeric_columns].add(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_add.columns = [
"_add_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
# we dont need columns that are self interacted
col = [
"_add_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
if i != j
]
data_add = data_add[col]
# we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)
col1 = [i for i in data_add.columns if np.nansum(data_add[i]) != 0]
data_add = data_add[col1]
data_add.index = data.index
else:
data_add = pd.DataFrame()
# for substraction, we only want it to apply to numerical columns
if "subtract" in self.interactions:
data_substract = pd.concat(
[
data_without_target[self.numeric_columns].sub(col[1], axis="index")
for col in data_without_target[self.numeric_columns].iteritems()
],
axis=1,
)
data_substract.columns = [
"_subtract_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
]
# we dont need columns that are self interacted
col = [
"_subtract_".join([i, j])
for j in data_without_target[self.numeric_columns].columns
for i in data_without_target[self.numeric_columns].columns
if i != j
]
data_substract = data_substract[col]
# we dont need columns where the sum of the total column is null (to catagorical variables never happening togather)
col1 = [
i for i in data_substract.columns if np.nansum(data_substract[i]) != 0
]
data_substract = data_substract[col1]
data_substract.index = data.index
else:
data_substract = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 12 11:57:29 2021
@author: ml
"""
from code.feature_extraction.weekday import Weekday
import numpy as np
import pandas as pd
import unittest
class WeekdayTest(unittest.TestCase):
def setUp(self):
self.INPUT_COLUMN = "input"
self.Weekday_feature = Weekday(self.INPUT_COLUMN)
self.df = | pd.DataFrame() | pandas.DataFrame |
import pathlib
import re
import tifffile
import pandas as pd
def get_wh(fn):
with tifffile.TiffFile(str(fn)) as im:
w, h = reversed(im.asarray().shape[:2])
return w, h
if __name__ == '__main__':
sm = snakemake
re_basic = re.compile(sm.params.re_basic)
re_crop = re.compile(sm.params.re_crop)
re_suffix = re.compile(sm.params.re_suffix)
fol_labs = pathlib.Path(sm.input.fol_labels)
file_dict = {fp.name: re_basic.match(fp.name).groupdict()
for fp in fol_labs.glob('*_label.tiff')}
for fn, dic in file_dict.items():
crop_match = re_crop.match(dic['cropname'])
if crop_match is not None:
dic.update(**crop_match.groupdict())
suffix_match = re_suffix.match(dic['suffix'])
if suffix_match is not None:
dic.update(**suffix_match.groupdict())
dic['filename'] = fn
if (dic.get('w', None) is None
or dic.get('h', None) is None):
dic['w'], dic['h'] = get_wh(fol_labs / fn)
fn_manual_coordinates = pathlib.Path(sm.params.fn_manual_coordinates)
dat_crops = pd.DataFrame(file_dict).T
dat_crops['use'] = 1
if fn_manual_coordinates.exists():
dat_m_crops = | pd.read_csv(fn_manual_coordinates) | pandas.read_csv |
import sys
import requests
import pandas as pd
from datetime import datetime
extraction_date = datetime.today().strftime("%Y%m%d_%H%M%S")
HEADERS = {
"accept": "application/json; charset=utf-8",
"accept-encoding": "gzip, deflate, br",
"accept-language": "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7",
"content-length": "0",
"content-type": "application/json; charset=utf-8",
"origin": "https://www.immobilienscout24.de",
"referer": "https://www.immobilienscout24.de/Suche/radius/wohnung-mieten?centerofsearchaddress=Berlin;10829;;;;&geocoordinates=52.4856;13.36495;100.0&enteredFrom=one_step_search",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
"x-requested-with": "XMLHttpRequest",
}
def get_immo24_search_pages(headers=HEADERS):
searchresults = []
pageexists = True
i = 1
while pageexists:
try:
r = requests.post(
f"https://www.immobilienscout24.de/Suche/de/berlin/berlin/wohnung-mieten?pagenumber={i}",
headers=headers,
)
rdict = r.json()
try:
searchresults.append(
rdict["searchResponseModel"]["resultlist.resultlist"][
"resultlistEntries"
][0]["resultlistEntry"]
)
except:
sys.stdout.write(str("All pages fetched...") + '\n')
pageexists = False
except:
sys.stdout.write(str("All pages fetched...") + '\n')
pageexists = False
i += 1
return searchresults
def get_listings_detailed_info(searchresults):
listing_results = []
# Solution based in https://github.com/s0er3n/immobilienscout24-scraper/blob/master/immobilienscout24-scraper.py
for d in searchresults:
for x in d:
try:
home_id = x["@id"]
except:
home_id = "N/A"
try:
title = x["resultlist.realEstate"]["title"]
except:
title = "N/A"
try:
postcode = x["resultlist.realEstate"]["address"]["postcode"]
except:
postcode = "N/A"
try:
price = x["resultlist.realEstate"]["price"]["value"]
except:
price = "N/A"
try:
street = x["resultlist.realEstate"]["address"]["street"]
except:
street = "N/A"
try:
houseNumber = x["resultlist.realEstate"]["address"]["houseNumber"]
except:
houseNumber = "N/A"
try:
city = x["resultlist.realEstate"]["address"]["city"]
except:
city = "N/A"
try:
quarter = x["resultlist.realEstate"]["address"]["quarter"]
except:
quarter = "N/A"
try:
latitude = x["resultlist.realEstate"]["address"]["wgs84Coordinate"][
"latitude"
]
except:
latitude = "N/A"
try:
longitude = x["resultlist.realEstate"]["address"]["wgs84Coordinate"][
"longitude"
]
except:
longitude = "N/A"
try:
livingSpace = x["resultlist.realEstate"]["livingSpace"]
except:
livingSpace = "N/A"
try:
numberOfRooms = x["resultlist.realEstate"]["numberOfRooms"]
except:
numberOfRooms = "N/A"
try:
balcony = x["resultlist.realEstate"]["balcony"]
except:
balcony = "N/A"
try:
garden = x["resultlist.realEstate"]["garden"]
except:
garden = "N/A"
try:
monthlyRate = x["resultlist.realEstate"]["monthlyRate"]
except:
monthlyRate = "N/A"
try:
builtInKitchen = x["resultlist.realEstate"]["builtInKitchen"]
except:
builtInKitchen = "N/A"
listing_results.append(
(
home_id,
title,
postcode,
price,
street,
houseNumber,
city,
quarter,
latitude,
longitude,
livingSpace,
numberOfRooms,
balcony,
garden,
monthlyRate,
builtInKitchen,
f"https://www.immobilienscout24.de/expose/{home_id}",
)
)
return listing_results
def get_df_with_columns(listing_results):
df_listings = | pd.DataFrame(listing_results) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # CONTENT
# 1. [Introduction](#1)
# 2. [Load and Check Data](#2)
# 3. [Outlier Detection](#3)
# 4. [Fill Missing Value](#4)
# 5. [Data Visualization](#5)
# 6. [Machine Learning Algorithms](#6)
# 7. [Results](#7)
#
#
#
#
# <a id="1"> </a>
# ## INTRODUCTION
#
# * Chronic kidney disease (CKD) is an important public health problem worldwide, especially for underdeveloped countries. Chronic kidney disease means that the kidney is not working as expected and cannot filter blood properly. Approximately 10% of the world's population suffers from this disease and millions die every year. Recently, the number of patients who have reached renal insufficiency is increasing, which necessitates kidney transplant or dialysis. CKD does not show any symptoms in its early stages. The only way to find out if the patient has kidney disease is by testing. Early detection of CKD in its early stages can help the patient receive effective treatment.
# * The aim of this study is to analyze the methods and compare their accuracy values by using 6 different machine learning methods.
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import StandardScaler
from sklearn.impute import KNNImputer
from sklearn import metrics
from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report,accuracy_score, f1_score, precision_score, recall_score, roc_auc_score
from imblearn.metrics import sensitivity_score
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_classification
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
import plotly.graph_objs as go
from sklearn.model_selection import GridSearchCV , KFold , cross_val_score, ShuffleSplit, cross_validate
from collections import Counter
plt.style.use("seaborn-muted")
# Input data files are available in the read-only "../../../input/mansoordaku_ckdisease/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import warnings
# ignore warnings
warnings.filterwarnings("ignore")
from subprocess import check_output
print(check_output(["ls", "../../../input/mansoordaku_ckdisease"]).decode("utf8"))
import os
for dirname, _, filenames in os.walk("../../../input/mansoordaku_ckdisease"):
for filename in filenames:
print(os.path.join(dirname, filename))
# <a id="2"> </a>
# ## LOAD AND CHECK DATA
# In[ ]:
#upload dataset
df = pd.read_csv("../../../input/mansoordaku_ckdisease/kidney_disease.csv")
# In[ ]:
#info about dataset
df.info()
# In[ ]:
#first five rows of dataset
df.head(10)
# In[ ]:
#drop id column
df.drop(["id"],axis=1,inplace=True)
# In[ ]:
#convert to numeric data type
df.pcv = pd.to_numeric(df.pcv, errors='coerce')
df.wc = pd.to_numeric(df.wc, errors='coerce')
df.rc = pd.to_numeric(df.rc, errors='coerce')
# In[ ]:
#statistical information of the features used in the data set
df.describe()
# In[ ]:
#correlation between the features used in the data set
df.corr()
# In[ ]:
#correlation map
f,ax = plt.subplots(figsize=(12, 12))
print()
print()
# <a id="3"> </a>
# ## OUTLIER DETECTION
# In[ ]:
#detect outliers
def detect_outliers(df,features):
outlier_indices=[]
for c in features:
Q1=np.percentile(df[c],25) #1st quartile
Q3=np.percentile(df[c],75) #3rd quartile
IQR=Q3-Q1 #IQR
outlier_step=IQR*1.5 #Outlier step
outlier_list_col=df[(df[c]<Q1-outlier_step) | (df[c]>Q3 + outlier_step)].index #Detect outlier and their indeces
outlier_indices.extend(outlier_list_col) #Store indeces
outlier_indices = Counter(outlier_indices)
multiple_outliers=list(i for i,v in outlier_indices.items() if v>2)
return multiple_outliers
# In[ ]:
#check if I have outliers
df.loc[detect_outliers(df,["age","bp","sg","al","bgr","bu","sc","sod","pot","hemo","pcv","wc","rc"])]
# <a id="4"> </a>
# ## FILL MISSING VALUE
# In[ ]:
#number of missing values in features
df.isnull().sum()
# In[ ]:
#another way to show missing data
print()
#plt.grid()
#plt.title("Number of Missing Values")
# In[ ]:
#show missing data
import missingno as msno
msno.matrix(df)
print()
# In[ ]:
#show missing data
msno.bar(df)
print()
# In[ ]:
#how missing data in age
df[df["age"].isnull()]
# In[ ]:
#fill missing data with mean value
df["bgr"]= df["bgr"].fillna(np.mean(df["bgr"]))
df["bu"]= df["bu"].fillna(np.mean(df["bu"]))
df["sc"]= df["sc"].fillna(np.mean(df["sc"]))
df["sod"]= df["sod"].fillna(np.mean(df["sod"]))
df["pot"]= df["pot"].fillna(np.mean(df["pot"]))
df["hemo"]= df["hemo"].fillna(np.mean(df["hemo"]))
df["pcv"]= df["pcv"].fillna(np.mean(df["pcv"]))
df["wc"]= df["wc"].fillna(np.mean(df["wc"]))
df["rc"]= df["rc"].fillna(np.mean(df["rc"]))
# In[ ]:
#The number "1" is indicated by "ckd" (the condition of kidney disease) and the number
#"0" is indicated by "notckd" (the state of the absence of kidney disease).
df["classification"] = [1 if i == "ckd" else 0 for i in df["classification"]]
# <a id="5"> </a>
# ## DATA VISUALIZATION
# In[ ]:
sns.countplot(df.classification)
plt.xlabel('Chronic Kidney Disease')
plt.title("Classification",fontsize=15)
print()
# In[ ]:
print()
#print()
# In[ ]:
#blood pressure-frequency graph
sns.factorplot(data=df, x='bp', kind= 'count',size=6,aspect=2)
# In[ ]:
#density-frequency graph
sns.factorplot(data=df, x='sg', kind= 'count',size=6,aspect=2)
# In[ ]:
#albumin-frequency graph
sns.factorplot(data=df, x='al', kind= 'count',size=6,aspect=2)
# In[ ]:
#sugar-frequency graph
sns.factorplot(data=df, x='su', kind= 'count',size=6,aspect=2)
# In[ ]:
df['dm'] = df['dm'].replace(to_replace={'\tno':'no','\tyes':'yes',' yes':'yes'})
df['cad'] = df['cad'].replace(to_replace='\tno',value='no')
# In[ ]:
#Check the bar graph of categorical data using factorplot
sns.factorplot(data=df, x='rbc', kind= 'count',size=4,aspect=2)
sns.factorplot(data=df, x='pc', kind= 'count',size=4,aspect=2)
sns.factorplot(data=df, x='pcc', kind= 'count',size=4,aspect=2)
sns.factorplot(data=df, x='ba', kind= 'count',size=4,aspect=2)
sns.factorplot(data=df, x='pcv', kind= 'count',size=6,aspect=2)
sns.factorplot(data=df, x='wc', kind= 'count',size=10,aspect=2)
sns.factorplot(data=df, x='rc', kind= 'count',size=6,aspect=2)
sns.factorplot(data=df, x='htn', kind= 'count',size=4,aspect=2)
sns.factorplot(data=df, x='dm', kind= 'count',size=4,aspect=2)
sns.factorplot(data=df, x='cad', kind= 'count',size=4,aspect=2)
sns.factorplot(data=df, x='appet', kind= 'count',size=4,aspect=2)
sns.factorplot(data=df, x='pe', kind= 'count',size=4,aspect=2)
sns.factorplot(data=df, x='ane', kind= 'count',size=4,aspect=2)
# In[ ]:
def hist_plot(variable):
plt.figure(figsize=(9,3))
plt.hist(df[variable],bins=50)
plt.xlabel(variable)
plt.ylabel("Frequency")
plt.title("Age Distribution with Histogram")
print()
# In[ ]:
numericVar = ["age"]
for n in numericVar:
hist_plot(n)
# In[ ]:
plt.figure(figsize=(70,25))
plt.legend(loc='upper left')
g = sns.countplot(data = df, x = 'age', hue = 'classification')
g.legend(title = 'Kidney Disease', loc='upper left', bbox_to_anchor=(0.1, 0.5), ncol=1)
g.tick_params(labelsize=20)
plt.setp(g.get_legend().get_texts(), fontsize='32')
plt.setp(g.get_legend().get_title(), fontsize='42')
g.axes.set_title('Graph of the number of patients with chronic kidney disease by age',fontsize=50)
g.set_xlabel('Age',fontsize=40)
g.set_ylabel("Count",fontsize=40)
# In[ ]:
g = sns.FacetGrid(df,col="classification")
g.map(sns.distplot,"age", bins=25)
print()
# In[ ]:
sns.factorplot(x="classification",y="age",data=df,kind="box")
print()
# In[ ]:
age_corr = ['age', 'classification']
age_corr1 = df[age_corr]
age_corr_y = age_corr1[age_corr1['classification'] == 1].groupby(['age']).size().reset_index(name = 'count')
age_corr_y.corr()
# In[ ]:
sns.regplot(data = age_corr_y, x = 'age', y = 'count').set_title("Correlation graph for Age vs chronic kidney disease patient")
# In[ ]:
age_corr_n = age_corr1[age_corr1['classification'] == 0].groupby(['age']).size().reset_index(name = 'count')
age_corr_n.corr()
# In[ ]:
sns.regplot(data = age_corr_n, x = 'age', y = 'count').set_title("Correlation graph for Age vs healthy patient")
# In[ ]:
df2 = df.loc[:,["bp","bgr","sod","pot","pcv"]]
df2.plot()
# In[ ]:
df2.plot(subplots = True)
print()
# In[ ]:
g = sns.jointplot("age", "classification", data=df, size=7,ratio=3, color="r")
# In[ ]:
g = sns.jointplot(df.age, df.classification, kind="kde", size=7)
#pearsonr shows the correlation between two features, 1 if positive , -1 if negative, 0 if no correlation.
# In[ ]:
pal = sns.cubehelix_palette(2, rot=-.5, dark=.3)
sns.violinplot(data=df2, palette=pal, inner="points")
print()
# In[ ]:
sns.boxplot(x="sg", y="age", hue="classification",data=df, palette="PRGn")
print()
# In[ ]:
g = sns.FacetGrid(df,col="classification",row="sg")
g.map(plt.hist,"age", bins=25)
g.add_legend()
print()
# In[ ]:
#I assigned the value 0 and 1 to the nominal features
df['rbc'] = df.rbc.replace(['normal','abnormal'], ['1', '0'])
df['pc'] = df.pc.replace(['normal','abnormal'], ['1', '0'])
df['pcc'] = df.pcc.replace(['present','notpresent'], ['1', '0'])
df['ba'] = df.ba.replace(['present','notpresent'], ['1', '0'])
df['htn'] = df.htn.replace(['yes','no'], ['1', '0'])
df['dm'] = df.dm.replace(['yes','no'], ['1', '0'])
df['cad'] = df.cad.replace(['yes','no'], ['1', '0'])
df['appet'] = df.appet.replace(['good','poor'], ['1', '0'])
df['pe'] = df.pe.replace(['yes','no'], ['1', '0'])
df['ane'] = df.ane.replace(['yes','no'], ['1', '0'])
df.head()
# In[ ]:
#then I converted them to numeric data type
df.rbc = pd.to_numeric(df.rbc, errors='coerce')
df.pc = pd.to_numeric(df.pc, errors='coerce')
df.pcc = pd.to_numeric(df.pcc, errors='coerce')
df.ba = pd.to_numeric(df.ba, errors='coerce')
df.htn = pd.to_numeric(df.htn, errors='coerce')
df.dm = | pd.to_numeric(df.dm, errors='coerce') | pandas.to_numeric |
import pandas as pd
from collections import namedtuple
from xbbg.io import logs, param
from xbbg.core import timezone
Futures = dict(
Jan='F', Feb='G', Mar='H', Apr='J', May='K', Jun='M',
Jul='N', Aug='Q', Sep='U', Oct='V', Nov='X', Dec='Z',
)
CurrencyPair = namedtuple('CurrencyPair', ['ticker', 'factor', 'power'])
ValidSessions = ['allday', 'day', 'am', 'pm', 'night', 'pre', 'post']
def exch_info(ticker: str) -> pd.Series:
"""
Exchange info for given ticker
Args:
ticker: ticker or exchange
Returns:
pd.Series
Examples:
>>> exch_info('SPY US Equity')
tz America/New_York
allday [04:00, 20:00]
day [09:30, 16:00]
pre [04:00, 09:30]
post [16:01, 20:00]
dtype: object
>>> exch_info('ES1 Index')
tz America/New_York
allday [18:00, 17:00]
day [08:00, 17:00]
dtype: object
>>> exch_info('Z 1 Index')
tz Europe/London
allday [01:00, 21:00]
day [01:00, 21:00]
dtype: object
>>> exch_info('TESTTICKER Corp').empty
True
>>> exch_info('US')
tz America/New_York
allday [04:00, 20:00]
day [09:30, 16:00]
pre [04:00, 09:30]
post [16:01, 20:00]
dtype: object
"""
logger = logs.get_logger(exch_info, level='debug')
if ' ' not in ticker.strip():
ticker = f'XYZ {ticker.strip()} Equity'
info = param.load_info(cat='exch').get(
market_info(ticker=ticker).get('exch', ''), dict()
)
if ('allday' in info) and ('day' not in info):
info['day'] = info['allday']
if any(req not in info for req in ['tz', 'allday', 'day']):
logger.error(f'required exchange info cannot be found in {ticker} ...')
return pd.Series()
for ss in ValidSessions:
if ss not in info: continue
info[ss] = [param.to_hour(num=s) for s in info[ss]]
return | pd.Series(info) | pandas.Series |
import pandas as pd
import networkx as nx
import warnings
import seaborn as sns
import numpy as np
import matplotlib.patches as mpatches
import microbe_directory as md
from capalyzer.packet_parser import DataTableFactory, NCBITaxaTree, annotate_taxa, TaxaTree
from capalyzer.packet_parser.data_utils import group_small_cols
from capalyzer.packet_parser.diversity_metrics import (
shannon_entropy, richness, chao1, rarefaction_analysis
)
from sklearn.decomposition import PCA
from scipy.cluster.hierarchy import linkage, cophenet, leaves_list
from scipy.spatial.distance import squareform, pdist, jensenshannon
from os.path import join
from metasub_utils.packet_parse import MetaSUBTableFactory
from capalyzer.packet_parser.experimental import umap
from capalyzer.packet_parser.data_utils import group_small_cols
from capalyzer.packet_parser.normalize import proportions, prevalence
from plotnine import *
from scipy.cluster.hierarchy import fcluster
from matplotlib import pyplot as plt
from capalyzer.constants import MICROBE_DIR
from .figs_data import MetaSUBFiguresData
class MetaSUBFigures(MetaSUBFiguresData):
def tbl1(self):
"""Return a pandas dataframe listing where and when samples were collected."""
tbl = self.meta.copy()
tbl = tbl.loc[tbl['control_type'].isna()]
tbl = tbl.loc[~tbl['city'].isna()]
tbl = tbl.query('city != "other"')
tbl = pd.crosstab(tbl['city'], tbl['project'])
tbl['Region'] = self.meta.groupby('city').apply(lambda x: x['continent'].iloc[0])
tbl['Region'] = tbl['Region'].str.replace('_', ' ').str.title()
tbl.index = tbl.index.str.replace('_', ' ').str.title()
tbl = tbl.set_index('Region', append=True)
tbl = tbl.reorder_levels(['Region', 'city'])
tbl = tbl.sort_index()
other_projs = list(tbl.columns[tbl.sum(axis=0) < 100]) + ['PATHOMAP_WINTER']
tbl['Other'] = tbl[other_projs].sum(axis=1)
tbl = tbl.drop(columns=other_projs)
tbl['Total'] = tbl.sum(axis=1)
tbl = tbl[['PILOT', 'CSD16', 'CSD17', 'Other', 'Total']] # column order
continent_totals = tbl.groupby(level=0).sum()
continent_totals['city'] = 'AAA Region Total' # AAA so sort puts these first
continent_totals = continent_totals.set_index('city', append=True)
tbl = pd.concat([tbl, continent_totals]).sort_index()
ctrl = self.meta.copy()
ctrl = ctrl.loc[~ctrl['control_type'].isna()]
ctrl = pd.crosstab(ctrl['control_type'], ctrl['project'])
ctrl.index.names = ['city']
ctrl['Region'] = 'Control'
ctrl = ctrl.set_index('Region', append=True)
ctrl = ctrl.reorder_levels(['Region', 'city'])
other_projs = ctrl.columns[ctrl.sum(axis=0) < 10]
ctrl['Other'] = ctrl[other_projs].sum(axis=1)
ctrl = ctrl.drop(columns=other_projs)
ctrl['Total'] = ctrl.sum(axis=1)
cols = [
col for col in ['PILOT', 'CSD16', 'CSD17', 'Other', 'Total']
if col in ctrl.columns
]
ctrl = ctrl[cols]
tbl = pd.concat([ctrl, tbl])
tbl.index = tbl.index.set_levels(tbl.index.levels[1].str.replace('AAA', ''), level=1)
return tbl
def fig1(self, N=75):
"""Figure showing the major taxa found in the metasub data."""
return [
self.fig1_core_taxa_tree(),
self.fig1_prevalence_curve(),
self.fig1_major_taxa_curves(N=N),
self.fig1_species_rarefaction(),
self.fig1_reference_comparisons(),
self.fig1_fraction_unclassified(),
]
def fig1_core_taxa_tree(self):
"""Return an ETE tree showing core taxa with annotations."""
def fig1_prevalence_curve(self):
"""Return a P9 figure showing the distribution of species prevalences."""
prev = pd.DataFrame({
'total': prevalence(self.wide_taxa),
'city': self.wide_taxa.groupby(by=self.meta['city']).apply(prevalence).mean(axis=0),
})
prev['taxa'] = prev.index
prev_flat = prev.melt(id_vars='taxa')
plot = (
ggplot(prev_flat, aes(x='value', color='variable', fill='variable')) +
geom_density(size=2, alpha=0.2) +
theme_minimal() +
xlab('Species Prevalence') +
ylab('Density') +
geom_vline(xintercept=0.25, color='black') +
geom_vline(xintercept=0.70, color='black') +
geom_vline(xintercept=0.95, color='black') +
annotate(geom='label', x=0.65, y=2.9, label="Sub-Core 70-95% (1,084)", size=20) +
annotate(geom='label', x=0.33, y=3.5, label="Peripheral, < 25% (2,466)", size=20) +
annotate(geom='label', x=0.78, y=3.2, label="Core > 95% (61)", size=20) +
scale_color_brewer(type='qualitative', palette=6, direction=1) +
scale_fill_brewer(type='qualitative', palette=6, direction=1) +
theme(
text=element_text(size=20),
axis_text_x=element_text(angle=0, hjust=1),
figure_size=(8, 8),
legend_position='none',
)
)
return plot
def fig1_major_taxa_curves(self, N=75):
"""Return two P9 panels showing prevalence and abundance distributions of major taxa."""
taxa = self.wide_taxa_rel
city = taxa.groupby(by=self.meta['city']).median()
top_taxa = taxa.mean().sort_values(ascending=False)[:N].index
taxa, city = 1000 * 1000 * taxa[top_taxa], 1000 * 1000 * city[top_taxa]
taxa_prev, city_prev = prevalence(taxa), prevalence(city)
taxa_prev = | pd.DataFrame({'taxon': taxa_prev.index, 'prevalence': taxa_prev, 'names': taxa_prev.index}) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_vulnerabilidad_pca.ipynb (unless otherwise specified).
__all__ = ['get_pca_geodf', 'scale_vars', 'get_pca_columns', 'ajustar_pca', 'cargar_indices_vulnerabilidad']
# Cell
import pandas as pd
import geopandas as gpd
import numpy as np
import datetime
from .datos import *
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn import preprocessing
# librerias para visualizacion
import seaborn as sns
import matplotlib.pyplot as plt
# librerias de sistema de archivos y busqueda
import os
import glob
import csv
# Cell
def get_pca_geodf(data, numeric_vars, n_components=3):
x = scale_vars(data, numeric_vars)
pca = PCA(n_components=n_components)
principalComponents = pca.fit_transform(x)
columns = get_pca_columns(pca)
principalDataframe = pd.DataFrame(data = principalComponents, columns = columns)
newDataframe = pd.concat([data, principalDataframe], axis = 1)
return pca, newDataframe
# Cell
def scale_vars(data, numeric_vars):
x = data[numeric_vars]
x = StandardScaler().fit_transform(x)
x = pd.DataFrame(x, columns=numeric_vars)
return x
# Cell
def get_pca_columns(pca):
columns = []
for i in range(pca.n_components):
columns.append(f'PC{i + 1}')
return columns
# Cell
def ajustar_pca(municipios_df, caracteristicas, version=0):
indicadores = ['densi', 'p_mayores', 'p_niños_m', 'p_sin_derech',
'num_hosp_pp', 'num_camas_pp', 'tasa_diabetes', 'tasa_cardiacas',
'tasa_cancer', 'tasa_pulmonares']
raise NotImplementedError('En construccion')
# Cell
def cargar_indices_vulnerabilidad(nombre_archivo, formato_largo=False):
indicadores_municipal = leer_variables_municipales_std()
df = | pd.read_csv(nombre_archivo, dtype={'CLAVE_MUNICIPIO_RES': str}) | pandas.read_csv |
# coding: utf-8
# http://stackoverflow.com/questions/27889873/clustering-text-documents-using-scikit-learn-kmeans-in-python
# http://brandonrose.org/clustering
import os
import re
import sys
import frontmatter
import matplotlib.pyplot as plt
import pandas as pd
import pytoml as toml
import mpld3
import numpy as np
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.grid_search import GridSearchCV
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.pipeline import Pipeline
reload(sys) # to re-enable sys.setdefaultencoding()
sys.setdefaultencoding('utf-8')
stop = stopwords.words('spanish')
stopE = stopwords.words('english')
stop = stop + stopE + ['com', 'más', 'si', 'está', 'puede', 'ejemplo', 'usar',
'aplicación', 'siguiente', 'cada', 'ser', 'vez',
'hacer', 'podemos' 'cómo', 'forma', 'así', 'asi', 'dos',
'tipo', 'nombre', 'ahora', 'también', 'solo', 'ver',
'qué', 'pueden', 'hace', 'tener', 'número', 'valor',
'artículo', 'parte', '»»', 'c', 'vamos', 'uso', 'debe',
'página', 'todas', 'decir', 'están', 'puedes', 'dentro',
'ello', 'blog', 'realizar', 'lugar', 'además', 'aquí',
'etc', 'aunque', 'nuevo', 'último', 'será', 'tema',
'bien', 'sólo', 'solo', 'hecho', 'cosas', 'poder',
'simplemente', 'simple', 'artículos', 'va', 'debemos',
'debería', 'hoy', 'algún', '–', 'sido', 'sí', 'éste',
'varios', 'aún', 'x', 'tan', 'podría', 'seguir', 'día',
'tres', 'cuatro', 'cinco', 'voy', 'ir', 'tal',
'mientras', 'saber', 'existe', 'sería', 'pasar',
'pueda', '¿qué', 'dejo', 'él', '»', 'ir', 'trabajar',
'Éste', 'n', 'mas', 'serán', 'ejempl', 'algun',
'aplicacion', 'aplic', 'bas', 'cas', 'cre', 'llam',
'numer', 'pod', 'referent', 'pas', 'tambi', u'ultim',
u'unic', u'usa', u'usand', u'usuari', u'utiliz',
u'variabl', u'version', u'visit', u'vist', u'web',
u'\xbb\xbb', 'import', 'podr', 'util', 'gran', 'siti',
'sol', 'solucion', 'aquell', 'pued', 'inform', 'deb',
'archiv', 'sistem', 'mism', 'permit', 'articul', 'ea',
'f', 'fc', 'non', 'bd', 'nuev', 'pdf', 'gui', 'notici',
'debi', 'mejor', 'misc', 'use', 'websit']
stop = set(stop)
def readPosts(path, english=False):
"""Read posts in path and return a pandas Data frame"""
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/10/14 12:19
Desc: 巨潮资讯-数据中心-专题统计-债券报表-债券发行
http://webapi.cninfo.com.cn/#/thematicStatistics
"""
import time
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
js_str = """
function mcode(input) {
var keyStr = "<KEY> <KEY>;
var output = "";
var chr1, chr2, chr3 = "";
var enc1, enc2, enc3, enc4 = "";
var i = 0;
do {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2)
+ keyStr.charAt(enc3) + keyStr.charAt(enc4);
chr1 = chr2 = chr3 = "";
enc1 = enc2 = enc3 = enc4 = "";
} while (i < input.length);
return output;
}
"""
def bond_treasure_issue_cninfo(
start_date: str = "20210910", end_date: str = "20211109"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-国债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 国债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1120"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"F009D": "缴款日",
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "发行起始日",
"F003D": "发行终止日",
"F008N": "单位面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "计划发行总量",
"F005N": "实际发行总量",
"F028N": "增发次数",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F002V": "交易市场",
"F013V": "发行方式",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"发行起始日",
"发行终止日",
"计划发行总量",
"实际发行总量",
"发行价格",
"单位面值",
"缴款日",
"增发次数",
"交易市场",
"发行方式",
"发行对象",
"公告日期",
"债券名称",
]
]
temp_df["发行起始日"] = pd.to_datetime(temp_df["发行起始日"]).dt.date
temp_df["发行终止日"] = pd.to_datetime(temp_df["发行终止日"]).dt.date
temp_df["缴款日"] = pd.to_datetime(temp_df["缴款日"]).dt.date
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["计划发行总量"] = pd.to_numeric(temp_df["计划发行总量"])
temp_df["实际发行总量"] = pd.to_numeric(temp_df["实际发行总量"])
temp_df["发行价格"] = pd.to_numeric(temp_df["发行价格"])
temp_df["单位面值"] = pd.to_numeric(temp_df["单位面值"])
temp_df["增发次数"] = pd.to_numeric(temp_df["增发次数"])
return temp_df
def bond_local_government_issue_cninfo(
start_date: str = "20210911", end_date: str = "20211110"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-地方债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 地方债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1121"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"F009D": "缴款日",
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "发行起始日",
"F003D": "发行终止日",
"F008N": "单位面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "计划发行总量",
"F005N": "实际发行总量",
"F028N": "增发次数",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F002V": "交易市场",
"F013V": "发行方式",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"发行起始日",
"发行终止日",
"计划发行总量",
"实际发行总量",
"发行价格",
"单位面值",
"缴款日",
"增发次数",
"交易市场",
"发行方式",
"发行对象",
"公告日期",
"债券名称",
]
]
temp_df["发行起始日"] = pd.to_datetime(temp_df["发行起始日"]).dt.date
temp_df["发行终止日"] = pd.to_datetime(temp_df["发行终止日"]).dt.date
temp_df["缴款日"] = pd.to_datetime(temp_df["缴款日"]).dt.date
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["计划发行总量"] = pd.to_numeric(temp_df["计划发行总量"])
temp_df["实际发行总量"] = pd.to_numeric(temp_df["实际发行总量"])
temp_df["发行价格"] = pd.to_numeric(temp_df["发行价格"])
temp_df["单位面值"] = pd.to_numeric(temp_df["单位面值"])
temp_df["增发次数"] = pd.to_numeric(temp_df["增发次数"])
return temp_df
def bond_corporate_issue_cninfo(
start_date: str = "20210911", end_date: str = "20211110"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-企业债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 企业债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1122"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "交易所网上发行终止日",
"F003D": "交易所网上发行起始日",
"F008N": "发行面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "实际发行总量",
"F005N": "计划发行总量",
"F022N": "最小认购单位",
"F017V": "承销方式",
"F052N": "最低认购额",
"F015V": "发行范围",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F013V": "发行方式",
"F023V": "募资用途说明",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"公告日期",
"交易所网上发行起始日",
"交易所网上发行终止日",
"计划发行总量",
"实际发行总量",
"发行面值",
"发行价格",
"发行方式",
"发行对象",
"发行范围",
"承销方式",
"最小认购单位",
"募资用途说明",
"最低认购额",
"债券名称",
]
]
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["交易所网上发行起始日"] = pd.to_datetime(temp_df["交易所网上发行起始日"]).dt.date
temp_df["交易所网上发行终止日"] = pd.to_datetime(temp_df["交易所网上发行终止日"]).dt.date
temp_df["计划发行总量"] = pd.to_numeric(temp_df["计划发行总量"])
temp_df["实际发行总量"] = pd.to_numeric(temp_df["实际发行总量"])
temp_df["发行面值"] = pd.to_numeric(temp_df["发行面值"])
temp_df["发行价格"] = pd.to_numeric(temp_df["发行价格"])
temp_df["最小认购单位"] = pd.to_numeric(temp_df["最小认购单位"])
temp_df["最低认购额"] = pd.to_numeric(temp_df["最低认购额"])
return temp_df
def bond_cov_issue_cninfo(
start_date: str = "20210913", end_date: str = "20211112"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-可转债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 可转债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1123"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
'F029D': '发行起始日',
'SECNAME': '债券简称',
'F027D': '转股开始日期',
'F003D': '发行终止日',
'F007N': '发行面值',
'F053D': '转股终止日期',
'F005N': '计划发行总量',
'F051D': '网上申购日期',
'F026N': '初始转股价格',
'F066N': '网上申购数量下限',
'F052N': '发行价格',
'BONDNAME': '债券名称',
'F014V': '发行对象',
'F002V': '交易市场',
'F032V': '网上申购简称',
'F086V': '转股代码',
'DECLAREDATE': '公告日期',
'F028D': '债权登记日',
'F004D': '优先申购日',
'F068D': '网上申购中签结果公告日及退款日',
'F054D': '优先申购缴款日',
'F008N': '网上申购数量上限',
'SECCODE': '债券代码',
'F006N': '实际发行总量',
'F067N': '网上申购单位',
'F065N': '配售价格',
'F017V': '承销方式',
'F015V': '发行范围',
'F013V': '发行方式',
'F021V': '募资用途说明',
'F031V': '网上申购代码'
},
inplace=True,
)
temp_df = temp_df[
[
'债券代码',
'债券简称',
'公告日期',
'发行起始日',
'发行终止日',
'计划发行总量',
'实际发行总量',
'发行面值',
'发行价格',
'发行方式',
'发行对象',
'发行范围',
'承销方式',
'募资用途说明',
'初始转股价格',
'转股开始日期',
'转股终止日期',
'网上申购日期',
'网上申购代码',
'网上申购简称',
'网上申购数量上限',
'网上申购数量下限',
'网上申购单位',
'网上申购中签结果公告日及退款日',
'优先申购日',
'配售价格',
'债权登记日',
'优先申购缴款日',
'转股代码',
'交易市场',
'债券名称',
]
]
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["发行起始日"] = pd.to_datetime(temp_df["发行起始日"]).dt.date
temp_df["发行终止日"] = pd.to_datetime(temp_df["发行终止日"]).dt.date
temp_df["转股开始日期"] = pd.to_datetime(temp_df["转股开始日期"]).dt.date
temp_df["转股终止日期"] = pd.to_datetime(temp_df["转股终止日期"]).dt.date
temp_df["转股终止日期"] = pd.to_datetime(temp_df["转股终止日期"]).dt.date
temp_df["网上申购日期"] = pd.to_datetime(temp_df["网上申购日期"]).dt.date
temp_df["网上申购中签结果公告日及退款日"] = pd.to_datetime(temp_df["网上申购中签结果公告日及退款日"]).dt.date
temp_df["债权登记日"] = pd.to_datetime(temp_df["债权登记日"]).dt.date
temp_df["优先申购日"] = pd.to_date | time(temp_df["优先申购日"]) | pandas.to_datetime |
## Basketball Reference Game Log Scraping ####################################################################################
# Georgia Tech: Daily Fantasy Sports Project
# authors: <NAME> & <NAME>
#### Process Outline #########################################################################################################
# Import historical results
# Import archives (so entire process doesn't have to re-run)
# Filter game-logs to day before contest
# Run prediction/optimization for top 10 line-ups (and save).
# Find player results within game-logs and calculate total line-up score
# if a player has < 10 (? 5?) points, add to "players to remove" lsiting, re-run optimization and resave top-10 line-ups
# Produce DF that stores each line-up, its result, entry cost, win/lose cash, percentile and rough estimate from percentile --> $ won
# Produce report on total $ won/lost, ROI
# (maybe run some cross-validation to see right # of line-ups to use nightly? see if we can start filtering the data for predictions from full season --> last x games and cross-validate?)
##############################################################################################################################
##### Questions ######
## TO DOs ##
# run on everyone
# test / confirm #
##### Notes ######
# complete run time: ~ 4 minutes per day of results
##############################################################################################################################
# Package Import #
import numpy as np
import pandas as pd
from time_analysis import analysis_timeSeries # to delete in init
from optimization import DFS_Optimization # to delete in init
from datetime import date, datetime
from dateutil import rrule
# Functions #
def import_hist_results(path):
dt = pd.read_csv(path)
dt.Date = pd.to_datetime(dt.Date)
return dt
def identify_new_dates(hist_dt, imported_final_dt):
result_dates = hist_dt.Date.dt.date.drop_duplicates().tolist()
analysis_dates = imported_final_dt.Date.dt.date.drop_duplicates().tolist()
filter_dates = list(set(result_dates) - set(analysis_dates))
filter_dates.sort()
filter_dates = [date.strftime('%Y-%m-%d') for date in filter_dates]
return filter_dates
def prep_historic_data_opt(fd_hist_plyr_results, filt_date):
fd_hist_plyr_results = fd_hist_plyr_results[(fd_hist_plyr_results.Date == filt_date)]
fd_hist_plyr_results = fd_hist_plyr_results[(fd_hist_plyr_results['FD Points'] >= 10)]
fd_hist_slrs = fd_hist_plyr_results[['Position','Player Name','Salary']].copy()
fd_hist_slrs = fd_hist_slrs.rename(columns={'Player Name': 'Nickname'}).reset_index(drop=True)
fd_hist_slrs = fd_hist_slrs.sort_values('Salary', ascending=False)
return fd_hist_plyr_results, fd_hist_slrs
def merge_optim_histPlayer(pred_df, fd_hist_results):
rslts_df = pd.merge(pred_df, fd_hist_results, left_on=['Date','Player'], right_on=['Date','Player Name'], how='inner')
rslts_df = rslts_df.groupby(['Optimization_No','Date','Predicted_Score'])['FD Points'].agg(FD_Points='sum',Player_Count='count').reset_index()
rslts_df = rslts_df[['Optimization_No','Date','Predicted_Score','FD_Points','Player_Count']]
rslts_df.Date = pd.to_datetime(rslts_df.Date)
return rslts_df
def merge_model_contest(rslts_df, hst_rslts):
rslts_df = pd.merge(rslts_df, hst_rslts, left_on=['Date'], right_on=['Date'], how='inner')
rslts_df['Cash'] = np.where(rslts_df['FD_Points'] > rslts_df['Min Cash Score'],'Y','N')
rslts_df['Percentile'] = (rslts_df['FD_Points'] - rslts_df['Min Cash Score']) / (rslts_df['1st Place Score'] - rslts_df['Min Cash Score'])
rslts_df = rslts_df[['Optimization_No','Date','Predicted_Score','Cost','Player_Count','FD_Points','Cash','Percentile']]
return rslts_df
def percentile_conversion(rslts_df, prcnt_conv_dict):
conversion_df = pd.DataFrame.from_dict(prcnt_conv_dict, orient='index').reset_index()
conversion_df.columns = ['Percentile','multiplier']
rslts_df = rslts_df.sort_values('Percentile')
conversion_df = conversion_df.sort_values('Percentile')
rslts_df = pd.merge_asof(rslts_df, conversion_df, on='Percentile', direction='nearest')
rslts_df.Cost = rslts_df.Cost.str.replace('$','')
rslts_df.Cost = rslts_df.Cost.astype(float)
rslts_df['Outcome'] = np.where(rslts_df.Cash == 'Y',rslts_df.Cost * rslts_df.multiplier, 0)
return rslts_df
def roi(fnl_df):
print('ROI%: ' + str((((fnl_df.Outcome.sum() / fnl_df.Cost.sum()) - 1) * 100)))
print('Total $ Value: ' + str(fnl_df.Outcome.sum() - fnl_df.Cost.sum()))
# Variables / Hard Codes #
hist_results_path = 'fanDuel_results_data.csv'
final_df_path = 'final_df.csv'
number_of_lineups = 10
players_to_remove = []
percentile_conversion_data = {.05:1.7, .1:1.7, .15:1.7, .2:2, .25:2.1, .3:2.1, .35:2.1, .4:2.3, .45:3, .5:3.9, .55:4.9, .6:7.4, .65:9.2, .7:13.8, .75:27.7, .8:39.1, .85:189.5, .9:827, .95:1755.1}
# Execution #
print('Execution Started')
####### Execution #######
hist_results = import_hist_results(hist_results_path)
import_final_df = import_hist_results(final_df_path)
gameLog_dt = | pd.read_csv('gameLog_dt.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created 2022
@author: mminot
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import re
import pickle
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
def add_metric_to_df(df, is_float, metric_str):
data = df.copy()
modified_metric = []
if not is_float:
for entry in data['output/final_test_' + metric_str]:
m = re.search(r'[\-]*0\.[0-9]{4}',entry)
try:
modified_metric.append(float(m.group(0)))
except:
if entry.startswith('nan'):
modified_metric.append(np.nan)
else:
modified_metric.append(np.nan)
data['best_' + metric_str] = modified_metric
else:
data['best_' + metric_str] = data['output/final_test_' + metric_str]
return data
def get_rho_by_variable(df,variable_str, metric_str):
df = df[['best_' + metric_str,variable_str,'seed']].copy()
s1,s2,s3, s4, s5 = df[df['seed'] == 1].copy(), df[df['seed'] == 2].copy(), df[df['seed'] == 3].copy(), df[df['seed'] == 4].copy(), df[df['seed'] == 5].copy()
variable_arr = df[variable_str].drop_duplicates().values
variable_arr.sort()
df = df.drop_duplicates([variable_str,'seed'],keep='first')
df = df.sort_values([variable_str])
for var in variable_arr:
if var not in s1[variable_str].values:
df = df.append({'best_' + metric_str: np.nan, variable_str: var, 'seed': 1}, ignore_index=True)
if var not in s2[variable_str].values:
df = df.append({'best_' + metric_str: np.nan, variable_str: var, 'seed': 2}, ignore_index=True)
if var not in s3[variable_str].values:
df = df.append({'best_' + metric_str: np.nan, variable_str: var, 'seed': 3}, ignore_index=True)
if var not in s4[variable_str].values:
df = df.append({'best_' + metric_str: np.nan, variable_str: var, 'seed': 4}, ignore_index=True)
if var not in s5[variable_str].values:
df = df.append({'best_' + metric_str: np.nan, variable_str: var, 'seed': 5}, ignore_index=True)
df = df.sort_values(by=[variable_str])
test_rho = [list (x) for x in (zip(df[df['seed'] == 1]['best_' + metric_str],
df[df['seed'] == 2]['best_' + metric_str],
df[df['seed'] == 3]['best_' + metric_str],
df[df['seed'] == 4]['best_' + metric_str],
df[df['seed'] == 5]['best_' + metric_str],
))]
return test_rho
data_type = 'her2'
path = f'../results/'
cnn = 'her2_cnn.csv'
transformer = 'her2_transformer.csv'
model_list = [cnn, transformer]
model_str_list = ['cnn', 'transformer']
for model, model_str in zip(model_list, model_str_list):
data = pd.read_csv(path + model)
data['style_col'] = 'NT Augmented'
#Parse Metric from DataFrame
data = add_metric_to_df(data, is_float = False, metric_str = 'mcc')
data = data.rename(columns = {'truncate_factor': 'training pos/neg ratio'})
data_2 = data[data['aug_factor'] == '2']
data_2_rename = data_2.copy()
#rename dna aug fraction 1 to DNA
data_2_rename['aug_factor'] = '2'
data_2_rename['style_col'] = '2'
#convert aug_factor type from str to int
data_5 = data[data['aug_factor'] == '5']
data_5_rename = data_5.copy()
#rename dna aug fraction 1 to DNA
data_5_rename['aug_factor'] = '5'
data_5_rename['style_col'] = '5'
data_10 = data[data['aug_factor'] == '10']
data_10_rename = data_10.copy()
#rename dna aug fraction 1 to DNA
data_10_rename['aug_factor'] = '10'
data_10_rename['style_col'] = '10'
data = data.append(data_2_rename, ignore_index = True)
data = | pd.merge(data,data_2, how='outer', indicator=True) | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 19:28:58 2020
@author: hcb
"""
import pandas as pd
import numpy as np
import lightgbm as lgb
import os
from tqdm import tqdm
from sklearn.model_selection import KFold
from sklearn.metrics import f1_score
from config import config
import warnings
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import geohash
warnings.filterwarnings("ignore")
trn_path = config.train_dir
test_path = config.test_dir
def mode_mean(x):
return x.mode().mean()
def get_data(path):
df_list = []
for file in tqdm(sorted(os.listdir(path))):
file_path = os.path.join(path, file)
df = pd.read_csv(file_path)
df['time_id'] = list(range(len(df)))
df_list.append(df)
df = pd.concat(df_list)
return df
def get_latlng(df, precision=7):
tmp_df = pd.DataFrame()
tmp_df['lng'] = df['lon']
tmp_df['lat'] = df['lat']
tmp_df['code'] = tmp_df[[
'lng', 'lat'
]].apply(lambda x: geohash.encode(x['lat'], x['lng'],
precision=precision),
axis=1)
code = tmp_df['code'].values
return code
def transform_day(df):
df['day'] = df['time'].apply(lambda x: int(x[0:4]))
df['month'] = df['time'].apply(lambda x: int(x[0:2]))
df['hour'] = df['time'].apply(lambda x: int(x[5:7]))
df['minute'] = df['time'].apply(lambda x: int(x[8:10]))
df['seconds'] = df['time'].apply(lambda x: int(x[11:13]))
df['time_transform'] = (df['month'] * 31 + df['day']) * 24 + df[
'hour'
] + df['minute'] / 60 + df['seconds'] / 3600
return df
def get_feature(df2, train):
df = df2.copy()
df['new_id'] = (df['渔船ID'] + 1) * 10000 + df['time_id']
tmp_df = df[['渔船ID', 'lat', 'lon', 'time_transform', 'new_id']].copy()
tmp_df.columns = ['渔船ID', 'x_1', 'y_1', 'time_transform_1', 'new_id']
tmp_df['new_id'] = tmp_df['new_id'] + 1
df = df.merge(tmp_df, on=['渔船ID', 'new_id'], how='left')
df['dis_path'] = np.sqrt((df['x_1'] - df['lat']) ** 2 +
(df['y_1'] - df['lon']) ** 2)
df['slope'] = np.abs((df['y_1'] - df['lon']) /
(df['x_1'] - df['lat'] + 0.001))
df.dropna(inplace=True)
tmp_df = df.groupby('渔船ID')['dis_path'].agg({
'max', 'median', 'mean', 'sum'
}).reset_index()
tmp_df.columns = ['渔船ID', 'dis_path_max', 'dis_path_median',
'dis_path_mean', 'dis_path_sum']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df.groupby('渔船ID')['slope'].agg({
'max', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'slope_max', 'slope_median', 'slope_mean1']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['dis_path'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'dis_path_min2', 'dis_path_std2',
'dis_path_median2', 'dis_path_mean']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df.groupby('渔船ID')['slope'].agg({
'min', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'slope_min', 'slope_median2', 'slope_mean2']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['slope'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'slope_min3', 'slope_std3', 'slope_median3',
'slope_mean3']
train = train.merge(tmp_df, on='渔船ID', how='left')
df['time_delt'] = np.abs(df['time_transform_1'] - df['time_transform'])
df['dis/time'] = df['dis_path'] / df['time_delt']
tmp_df = df.groupby('渔船ID')['dis/time'].agg({
'mean', 'median'
}).reset_index()
tmp_df.columns = ['渔船ID', 'dis/time_mean', 'dis/time_median']
train = train.merge(tmp_df, on='渔船ID', how='left')
return train
def get_feature2(df2, train):
df = df2.copy()
df['new_id'] = (df['渔船ID'] + 1) * 10000 + df['time_id']
tmp_df = df[['渔船ID', '方向', '速度', 'new_id']].copy()
tmp_df.columns = ['渔船ID', '方向_1', '速度_1', 'new_id']
tmp_df['new_id'] = tmp_df['new_id'] + 1
df = df.merge(tmp_df, on=['渔船ID', 'new_id'], how='left')
df['方向_delt'] = np.abs(df['方向_1'] - df['方向'])
df['速度_delt'] = np.abs(df['速度_1'] - df['速度'])
df.dropna(inplace=True)
tmp_df = df.groupby('渔船ID')['方向_delt'].agg({
'max', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '方向_delt_mmax', '方向_delt_median', '方向_delt_mean']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = df.groupby('渔船ID')['方向_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '方向_delt_min2', '方向_delt_std2',
'方向_delt_median2', '方向_delt_mean2']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['方向_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '方向_delt_min3', '方向_delt_std3',
'方向_delt_median3', '方向_delt_mean3']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df.groupby('渔船ID')['速度_delt'].agg({
'max', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '速度_delt_max', '速度_delt_median', '速度_delt_mean']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = df.groupby('渔船ID')['速度_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '速度_delt_min2', '速度_delt_std2',
'速度_delt_median2', '速度_delt_mean2']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['速度_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '速度_delt_min3', '速度_delt_std3',
'速度_delt_median3', '速度_delt_mean3']
train = train.merge(tmp_df, on='渔船ID', how='left')
return train
df_train = get_data(trn_path)
train_ = df_train[['渔船ID', 'type']].drop_duplicates()
df_train = transform_day(df_train)
train_ = get_feature(df_train, train_)
train_ = get_feature2(df_train, train_)
train_.drop(['type', 'slope_mean1', 'slope_mean2'], axis=1, inplace=True)
df_test = get_data(test_path)
test = df_test[['渔船ID']].drop_duplicates()
df_test = transform_day(df_test)
test = get_feature(df_test, test)
test = get_feature2(df_test, test)
test.drop(['slope_mean1', 'slope_mean2'], axis=1, inplace=True)
print('begin tfidf')
data = pd.concat((df_train, df_test))
data['destination'] = data['lat'].map(str) + '_' + data['lon'].map(str)
enc_vec = TfidfVectorizer()
group_df = data.groupby(['渔船ID'])['destination'].agg({
lambda x: list(x)
}).reset_index()
group_df.columns = ['渔船ID', 'destination']
group_df['destination'] = group_df['destination'].apply(lambda x: ' '.join(x))
tfidf_vec = enc_vec.fit_transform(group_df['destination'])
svd_enc = TruncatedSVD(n_components=30, n_iter=20, random_state=1996)
vec_svd = svd_enc.fit_transform(tfidf_vec)
vec_svd = pd.DataFrame(vec_svd)
vec_svd.columns = ['svd_{}_{}'.format('destination', i) for i in range(30)]
group_df = | pd.concat([group_df, vec_svd], axis=1) | pandas.concat |
"""
Routine for Classifier and NER training. Provide a version and a model will be trained on a dataset
of the same version.
This script expects data/<version> to be a directory where models, metrics and dataset are present.
Usage:
train.py <version>
train.py (classification|ner) <version>
train.py (-h | --help)
train.py --version
Options:
<version> The version of the dataset to use, the model produced will also be in the same dir.
-h --help Show this screen.
--version Show version.
"""
import argparse
import json
import os
import functools
from datetime import datetime
import pandas as pd
import semver
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from slu import constants as const
from slu.dev.version import check_version_save_config
from slu.src.controller.prediction import get_workflow
from slu.utils import logger
from slu.utils.config import Config, YAMLLocalConfig
def make_label_column_uniform(data_frame: pd.DataFrame) -> None:
if const.INTENTS in data_frame.columns:
column = const.INTENTS
elif const.LABELS in data_frame.columns:
column = const.LABELS
elif const.TAG in data_frame.columns:
column = const.TAG
else:
raise ValueError(
f"Expected one of {const.LABELS}, {const.TAG} to be present in the dataset."
)
data_frame.rename(columns={column: const.TAG}, inplace=True)
def reftime_patterns(reftime: str):
time_fns = [
datetime.fromisoformat,
lambda date_string: datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S.%f %z %Z'),
lambda date_string: datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%SZ'),
lambda date_string: datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S.%f%z')
]
for time_fn in time_fns:
try:
return time_fn(reftime)
except ValueError:
continue
raise ValueError(f"Could not parse reftime {reftime}")
def make_reftime_column_uniform(data_frame: pd.DataFrame) -> None:
if const.REFERENCE_TIME not in data_frame.columns:
return
for i, row in tqdm(data_frame.iterrows(), total=len(data_frame), desc="Fixing reference time"):
if row[const.REFERENCE_TIME] is not None and not pd.isna(row[const.REFERENCE_TIME]):
data_frame.loc[i, const.REFERENCE_TIME] = reftime_patterns(row[const.REFERENCE_TIME]).isoformat()
def make_data_column_uniform(data_frame: pd.DataFrame) -> None:
if const.ALTERNATIVES in data_frame.columns:
column = const.ALTERNATIVES
elif const.DATA in data_frame.columns:
column = const.DATA
else:
raise ValueError(
f"Expected one of {const.ALTERNATIVES}, {const.DATA} to be present in the dataset."
)
data_frame.rename(columns={column: const.ALTERNATIVES}, inplace=True)
for i, row in tqdm(
data_frame.iterrows(), total=len(data_frame), desc="Fixing data structure"
):
if isinstance(row[const.ALTERNATIVES], str):
data = json.loads(row[const.ALTERNATIVES])
if const.ALTERNATIVES in data:
data_frame.loc[i, const.ALTERNATIVES] = json.dumps(
data[const.ALTERNATIVES]
)
def create_data_splits(args: argparse.Namespace) -> None:
"""
Create a data split for the given version.
:param args: The arguments passed to the script.
"""
version = args.version
project_config_map = YAMLLocalConfig().generate()
config: Config = list(project_config_map.values()).pop()
check_version_save_config(config, version)
dataset_file = args.file
train_size = args.train_size
test_size = args.test_size
stratify = args.stratify
dest = args.dest or config.get_dataset_dir(const.CLASSIFICATION)
if os.listdir(dest):
ver_ = semver.VersionInfo.parse(config.version)
ver_.bump_patch()
raise RuntimeError(
f"""
Data already exists in {dest} You should create a new version using:
```shell
slu dir-setup --version {str(ver_.bump_patch())}
```
""".strip()
)
if not os.path.isdir(dest):
raise ValueError(
f"Destination directory {dest} does not exist or is not a directory."
)
data_frame = pd.read_csv(dataset_file)
logger.debug(f"Data frame: {data_frame.shape}")
skip_list = config.get_skip_list(const.CLASSIFICATION)
make_label_column_uniform(data_frame)
make_data_column_uniform(data_frame)
make_reftime_column_uniform(data_frame)
skip_filter = data_frame[const.TAG].isin(skip_list)
failed_transcripts = data_frame[const.ALTERNATIVES].isin(["[[]]", "[]"])
non_empty_transcripts = data_frame[const.ALTERNATIVES].isna()
invalid_samples = skip_filter | non_empty_transcripts | failed_transcripts
train_skip_samples = data_frame[invalid_samples]
train_available_samples = data_frame[~invalid_samples]
logger.info(
f"Dataset has {len(train_skip_samples)} samples unfit for training."
f" Using this for tests and {len(train_available_samples)} for train-test split."
)
if stratify:
labels = data_frame[const.TAG][~invalid_samples]
else:
labels = None
train, test = train_test_split(
train_available_samples,
train_size=train_size,
test_size=test_size,
stratify=labels,
)
test = pd.concat([train_skip_samples, test], sort=False)
train.to_csv(os.path.join(dest, f"{const.TRAIN}.csv"), index=False)
test.to_csv(os.path.join(dest, f"{const.TEST}.csv"), index=False)
def merge_datasets(args: argparse.Namespace) -> None:
"""
Merge the datasets.
"""
data_files = args.files
file_name = args.out
data_frames = pd.concat([ | pd.read_csv(data_file) | pandas.read_csv |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data= | pd.read_csv(path) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from sklearn.utils.estimator_checks import parametrize_with_checks
from skdownscale.pointwise_models import (
AnalogRegression,
BcsdPrecipitation,
BcsdTemperature,
CunnaneTransformer,
EquidistantCdfMatcher,
LinearTrendTransformer,
PaddedDOYGrouper,
PureAnalog,
PureRegression,
QuantileMapper,
QuantileMappingReressor,
ZScoreRegressor,
)
@pytest.fixture(scope='module')
def sample_X_y(n=365):
index = pd.date_range('2019-01-01', periods=n)
X = pd.DataFrame(
{'foo': np.sin(np.linspace(-10 * np.pi, 10 * np.pi, n)) * 10, 'bar': np.random.rand((n))},
index=index,
)
y = X['foo'] + 2
return X, y
@parametrize_with_checks(
[
# Regressors
AnalogRegression(),
BcsdPrecipitation(),
BcsdTemperature(),
PureAnalog(),
PureRegression(),
ZScoreRegressor(),
QuantileMappingReressor(n_endpoints=2),
EquidistantCdfMatcher(kind='difference', n_endpoints=2),
EquidistantCdfMatcher(kind='ratio', n_endpoints=2),
# transformers
LinearTrendTransformer(),
CunnaneTransformer(),
QuantileMapper(),
]
)
def test_sklearn_compatible_estimator(estimator, check):
check(estimator)
def test_linear_trend_roundtrip():
# TODO: there is probably a better analytic test here
n = 100
trend = 1
yint = 15
trendline = trend * np.arange(n) + yint
trendline = trendline.reshape(-1, 1)
noise = np.sin(np.linspace(-10 * np.pi, 10 * np.pi, n)) * 10
noise = noise.reshape(-1, 1)
data = trendline + noise
ltt = LinearTrendTransformer()
# remove trend
d_no_trend = ltt.fit_transform(data)
# assert detrended data is equal to noise
np.testing.assert_almost_equal(d_no_trend, noise, decimal=0)
# assert linear coef is equal to trend
np.testing.assert_almost_equal(ltt.lr_model_.coef_, trend, decimal=0)
# assert roundtrip
np.testing.assert_array_equal(ltt.inverse_transform(d_no_trend), data)
def test_quantile_mapper():
n = 100
expected = np.sin(np.linspace(-10 * np.pi, 10 * np.pi, n)) * 10
expected = expected.reshape(-1, 1)
with_bias = expected + 2
mapper = QuantileMapper()
mapper.fit(expected)
actual = mapper.transform(with_bias)
np.testing.assert_almost_equal(actual, expected)
@pytest.mark.xfail(reason='Need 3 part QM routine to handle bias removal')
def test_quantile_mapper_detrend():
n = 100
trend = 1
yint = 15
trendline = trend * np.arange(n) + yint
base = np.sin(np.linspace(-10 * np.pi, 10 * np.pi, n)) * 10
expected = base + trendline
with_bias = expected + 2
mapper = QuantileMapper(detrend=True)
mapper.fit(base)
actual = mapper.transform(with_bias)
np.testing.assert_almost_equal(actual.squeeze(), expected)
@pytest.mark.parametrize(
'model',
[
BcsdTemperature(),
PureAnalog(),
AnalogRegression(),
PureRegression(),
ZScoreRegressor(),
QuantileMappingReressor(),
QuantileMappingReressor(extrapolate='min'),
QuantileMappingReressor(extrapolate='max'),
QuantileMappingReressor(extrapolate='both'),
QuantileMappingReressor(extrapolate='1to1'),
EquidistantCdfMatcher(),
EquidistantCdfMatcher(extrapolate='min'),
EquidistantCdfMatcher(extrapolate='max'),
EquidistantCdfMatcher(extrapolate='both'),
EquidistantCdfMatcher(extrapolate='1to1'),
],
)
def test_linear_model(model):
n = 365
# TODO: add test for time other time ranges (e.g. < 365 days)
index = pd.date_range('2019-01-01', periods=n)
X = pd.DataFrame({'foo': np.sin(np.linspace(-10 * np.pi, 10 * np.pi, n)) * 10}, index=index)
y = X + 2
model.fit(X, y)
y_hat = model.predict(X)
assert len(y_hat) == len(X)
@pytest.mark.parametrize(
'model_cls',
[PureAnalog, AnalogRegression, PureRegression],
)
def test_models_with_multiple_features(sample_X_y, model_cls):
X, y = sample_X_y
model = model_cls()
model.fit(X, y)
y_hat = model.predict(X)
assert len(y_hat) == len(X)
@pytest.mark.parametrize(
'kind',
['best_analog', 'sample_analogs', 'weight_analogs', 'mean_analogs'],
)
def test_gard_analog_models(sample_X_y, kind):
X, y = sample_X_y
# test non threshold modeling
model = PureAnalog(kind=kind, n_analogs=3)
model.fit(X, y)
out = model.predict(X)
y_hat = out['pred']
error = out['prediction_error']
prob = out['exceedance_prob']
assert len(prob) == len(error) == len(y_hat) == len(X)
assert (prob == 1).all()
# test threshold modeling
model = PureAnalog(kind=kind, n_analogs=3, thresh=0)
model.fit(X, y)
out = model.predict(X)
y_hat = out['pred']
error = out['prediction_error']
prob = out['exceedance_prob']
assert len(prob) == len(error) == len(y_hat) == len(X)
assert (prob <= 1).all()
assert (prob >= 0).all()
@pytest.mark.parametrize('thresh', [None, 3])
def test_gard_analog_regression_models(sample_X_y, thresh):
X, y = sample_X_y
model = AnalogRegression(thresh=thresh)
model.fit(X, y)
out = model.predict(X)
y_hat = out['pred']
error = out['prediction_error']
prob = out['exceedance_prob']
assert len(prob) == len(error) == len(y_hat) == len(X)
if model.thresh:
assert (prob <= 1).all()
assert (prob >= 0).all()
else:
assert (prob == 1).all()
@pytest.mark.parametrize('thresh', [None, 3])
def test_gard_pure_regression_models(sample_X_y, thresh):
X, y = sample_X_y
model = PureRegression(thresh=thresh)
model.fit(X, y)
out = model.predict(X)
y_hat = out['pred']
error = out['prediction_error']
prob = out['exceedance_prob']
assert len(prob) == len(error) == len(y_hat) == len(X)
if model.thresh:
assert (prob <= 1).all()
assert (prob >= 0).all()
else:
assert (prob == 1).all()
@pytest.mark.parametrize('model_cls', [BcsdPrecipitation])
def test_linear_model_prec(model_cls):
n = 365
# TODO: add test for time other time ranges (e.g. < 365 days)
index = pd.date_range('2019-01-01', periods=n)
X = pd.DataFrame({'foo': np.random.random(n)}, index=index)
y = X + 2
model = model_cls()
model.fit(X, y)
y_hat = model.predict(X)
assert len(y_hat) == len(X)
def test_zscore_scale():
time = pd.date_range(start='2018-01-01', end='2020-01-01')
data_X = np.linspace(0, 1, len(time))
data_y = data_X * 2
X = xr.DataArray(data_X, name='foo', dims=['index'], coords={'index': time}).to_dataframe()
y = xr.DataArray(data_y, name='foo', dims=['index'], coords={'index': time}).to_dataframe()
data_scale_expected = [2 for i in np.zeros(364)]
scale_expected = xr.DataArray(
data_scale_expected, name='foo', dims=['day'], coords={'day': np.arange(1, 365)}
).to_series()
zscore = ZScoreRegressor()
zscore.fit(X, y)
np.testing.assert_allclose(zscore.scale_, scale_expected)
def test_zscore_shift():
time = pd.date_range(start='2018-01-01', end='2020-01-01')
data_X = np.zeros(len(time))
data_y = np.ones(len(time))
X = xr.DataArray(data_X, name='foo', dims=['index'], coords={'index': time}).to_dataframe()
y = xr.DataArray(data_y, name='foo', dims=['index'], coords={'index': time}).to_dataframe()
shift_expected = xr.DataArray(
np.ones(364), name='foo', dims=['day'], coords={'day': np.arange(1, 365)}
).to_series()
zscore = ZScoreRegressor()
zscore.fit(X, y)
np.testing.assert_allclose(zscore.shift_, shift_expected)
def test_zscore_predict():
time = pd.date_range(start='2018-01-01', end='2020-01-01')
data_X = np.linspace(0, 1, len(time))
X = xr.DataArray(data_X, name='foo', dims=['index'], coords={'index': time}).to_dataframe()
shift = xr.DataArray(
np.zeros(364), name='foo', dims=['day'], coords={'day': np.arange(1, 365)}
).to_series()
scale = xr.DataArray(
np.ones(364), name='foo', dims=['day'], coords={'day': np.arange(1, 365)}
).to_series()
zscore = ZScoreRegressor()
zscore.shift_ = shift
zscore.scale_ = scale
i = int(zscore.window_width / 2)
expected = xr.DataArray(
data_X, name='foo', dims=['index'], coords={'index': time}
).to_dataframe()
expected[0:i] = 'NaN'
expected[-i:] = 'NaN'
out = zscore.predict(X)
np.testing.assert_allclose(out.astype(float), expected.astype(float))
def test_paddeddoygrouper():
index = pd.date_range(start='1980-01-01', end='1982-12-31')
X = pd.DataFrame({'foo': np.random.random(len(index))}, index=index)
day_groups = PaddedDOYGrouper(X)
doy_group_list = dict(list(day_groups))
day_of_year = 123
days_included = np.arange(day_of_year - 15, day_of_year + 16)
np.testing.assert_array_equal(
np.unique(doy_group_list[day_of_year].index.dayofyear), days_included
)
def test_BcsdTemperature_nasanex():
index = | pd.date_range(start='1980-01-01', end='1982-12-31') | pandas.date_range |
import pandas as pd
import numpy as np
import datetime
import calendar
from math import e
from brightwind.analyse import plot as plt
# noinspection PyProtectedMember
from brightwind.analyse.analyse import dist_by_dir_sector, dist_12x24, coverage, _convert_df_to_series
from ipywidgets import FloatProgress
from IPython.display import display
from IPython.display import clear_output
import re
import warnings
pd.options.mode.chained_assignment = None
__all__ = ['Shear']
class Shear:
class TimeSeries:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', max_plot_height=None,
maximise_data=False):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for each timestamp
of a wind series.
:param wspds: pandas DataFrame, list of pandas.Series or list of wind speeds to be used for calculating shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights.
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3.
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param max_plot_height: height to which the wind profile plot is extended.
:type max_plot_height: float
:param maximise_data: If maximise_data is True, calculations will be carried out on all data where two or
more anemometers readings exist for a timestamp. If False, calculations will only be
carried out on timestamps where readings exist for all anemometers.
:type maximise_data: Boolean
:return TimeSeries object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype TimeSeries object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights, maximise_data=True)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law',
max_plot_height=120)
# Get the alpha or roughness values calculated
timeseries_power_law.alpha
timeseries_log_law.roughness
# View plot
timeseries_power_law.plot
timeseries_log_law.plot
# View input anemometer data
timeseries_power_law.wspds
timeseries_log_law.wspds
# View other information
pprint.pprint(timeseries_power_law.info)
pprint.pprint(timeseries_log_law.info)
"""
print('This may take a while...')
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed, maximise_data=maximise_data)
if calc_method == 'power_law':
alpha_c = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_power_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
alpha = pd.Series(alpha_c.iloc[:, 0], name='alpha')
self._alpha = alpha
elif calc_method == 'log_law':
slope_intercept = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_log_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
slope = slope_intercept.iloc[:, 0]
intercept = slope_intercept.iloc[:, 1]
roughness_coefficient = pd.Series(Shear._calc_roughness(slope=slope, intercept=intercept),
name='roughness_coefficient')
self._roughness = roughness_coefficient
clear_output()
avg_plot = Shear.Average(wspds=wspds, heights=heights, calc_method=calc_method,
max_plot_height=max_plot_height)
self.origin = 'TimeSeries'
self.calc_method = calc_method
self.wspds = wspds
self.plot = avg_plot.plot
self.info = Shear._create_info(self, heights=heights, cvg=cvg, min_speed=min_speed)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, height, shear_to):
""""
Applies shear calculated to a wind speed time series and scales wind speed from one height to
another for each matching timestamp.
:param self: TimeSeries object to use when applying shear to the data.
:type self: TimeSeries object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: height to which wspds should be scaled to.
:type shear_to: float
:return: a pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Get power law object
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law')
# Scale wind speeds using calculated exponents
timeseries_power_law.apply(data['Spd40mN'], height=40, shear_to=70)
timeseries_log_law.apply(data['Spd40mN'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds, height, shear_to)
class TimeOfDay:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', by_month=True, segment_start_time=7,
segments_per_day=24, plot_type='line'):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for a wind series
binned by time of the day and (optionally by) month, depending on the user's inputs. The alpha/roughness
coefficient values are calculated based on the average wind speeds at each measurement height in each bin.
:param wspds: pandas.DataFrame, list of pandas.Series or list of wind speeds to be used for calculating
shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights..
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param by_month: If True, calculate alpha or roughness coefficient values for each daily segment and month.
If False, average alpha or roughness coefficient values are calculated for each daily
segment across all months.
:type by_month: Boolean
:param segment_start_time: Starting time for first segment.
:type segment_start_time: int
:param segments_per_day: Number of segments into which each 24 period is split. Must be a divisor of 24.
:type segments_per_day: int
:param plot_type: Type of plot to be generated. Options include 'line', 'step' and '12x24'.
:type plot_type: str
:return: TimeOfDay object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype: TimeOfDay object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeofday_power_law = bw.Shear.TimeOfDay(anemometers, heights, daily_segments=2, segment_start_time=7)
timeofday_log_law = bw.Shear.TimeOfDay(anemometers, heights, calc_method='log_law', by_month=False)
# Get alpha or roughness values calculated
timeofday_power_law.alpha
timeofday_log_law.roughness
# View plot
timeofday_power_law.plot
timeofday_log_law.plot
# View input data
timeofday_power_law.wspds
timeofday_log_law.wspds
# View other information
pprint.pprint(timeofday_power_law.info)
pprint.pprint(timeofday_log_law.info)
"""
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed)
# initialise empty series for later use
start_times = pd.Series([])
time_wspds = pd.Series([])
mean_time_wspds = pd.Series([])
c = pd.Series([])
slope = pd.Series([])
intercept = pd.Series([])
alpha = pd.Series([])
roughness = pd.Series([])
slope_df = pd.DataFrame([])
intercept_df = pd.DataFrame([])
roughness_df = pd.DataFrame([])
alpha_df = pd.DataFrame([])
# time of day shear calculations
interval = int(24 / segments_per_day)
if by_month is False and plot_type == '12x24':
raise ValueError("12x24 plot is only possible when 'by_month=True'")
if not int(segment_start_time) % 1 == 0:
raise ValueError("'segment_start_time' must be an integer between 0 and 24'")
if not (24 % segments_per_day == 0) | (segments_per_day == 1):
raise ValueError("'segments_per_day' must be a divisor of 24'")
segment_start_time = str(segment_start_time)
start_times[0] = datetime.datetime.strptime(segment_start_time, '%H')
dt = datetime.timedelta(hours=interval)
# extract wind speeds for each daily segment
for i in range(1, segments_per_day):
start_times[i] = start_times[i - 1] + dt
# extract wind speeds for each month
months_tot = pd.unique(wspds.index.month.values)
for j in months_tot:
anemometers_df = wspds[wspds.index.month == j]
for i in range(0, segments_per_day):
if segments_per_day == 1:
mean_time_wspds[i] = anemometers_df.mean().dropna()
elif i == segments_per_day - 1:
start_times[i] = start_times[i].strftime("%H:%M:%S")
start = str(start_times[i].time())
end = str(start_times[0].time())
time_wspds[i] = pd.DataFrame(anemometers_df).between_time(start, end, include_end=False)
mean_time_wspds[i] = time_wspds[i][(time_wspds[i] > min_speed).all(axis=1)].mean().dropna()
else:
start_times[i] = start_times[i].strftime("%H:%M:%S")
start = str(start_times[i].time())
end = str(start_times[i + 1].time())
time_wspds[i] = pd.DataFrame(anemometers_df).between_time(start, end, include_end=False)
mean_time_wspds[i] = time_wspds[i][(time_wspds[i] > min_speed).all(axis=1)].mean().dropna()
# calculate shear
if calc_method == 'power_law':
for i in range(0, len(mean_time_wspds)):
alpha[i], c[i] = Shear._calc_power_law(mean_time_wspds[i].values, heights, return_coeff=True)
alpha_df = pd.concat([alpha_df, alpha], axis=1)
if calc_method == 'log_law':
for i in range(0, len(mean_time_wspds)):
slope[i], intercept[i] = Shear._calc_log_law(mean_time_wspds[i].values, heights,
return_coeff=True)
roughness[i] = Shear._calc_roughness(slope=slope[i], intercept=intercept[i])
roughness_df = pd.concat([roughness_df, roughness], axis=1)
slope_df = | pd.concat([slope_df, slope], axis=1) | pandas.concat |
import os
import math
import copy
import random
import calendar
import csv
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import sqlite3
import seaborn as sns
#from atnresilience import atn_analysis as atn
import atn_analysis
import db_tools
# Set global styles for plots
plt.rcParams["font.family"] = "Times New Roman"
sns.set_palette("colorblind")
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('ytick', labelsize=8)
line_type = {1:'-',2:'--',3:':',4:'-.'}
def remove_frequency(db_path, file, airline, include_data, can_limit, zs_limit, processed_direc):
"""
Creates a dictionary of airports and their removal frequency for a given airline
Parameters
----------
file: int
Year of selected data
airline: string
Airline to get data from
include_data: string
Type of airline data to query from csv
can_limit: int
Cancellation limit
zs_limit: int
The z-score limit
Returns
-------
Returns a dictionary containing airport removal frequency values
Notes
-----
"""
df_net_tuple = pd.DataFrame()
df_net = atn_analysis.raw_query(db_path, file, airline)
df_net_tuple["Origin"] = df_net.Origin_Airport_Code
df_net_tuple["Destination"] = df_net.Destination_Airport_Code
graph = [tuple(x) for x in df_net_tuple.to_records(index=False)]
G = nx.Graph()
G.add_edges_from(graph)
tempG = G.copy()
Airport_Dict = {}
for i in G.nodes():
Airport_Dict[i] = 0
Total_List = get_remove_list(db_path, file,include_data, airline, can_limit, zs_limit, processed_direc)
if int(file)%4 == 0:
total_day = 366
else:
total_day = 365
for j in range(total_day):
airport_list = Total_List[j]
for l in airport_list:
tempG.remove_node(l)
Airport_Dict[l] = Airport_Dict[l] + 1
tempG = G.copy()
return(Airport_Dict)
def weighted_edge(db_path, file, airline):
"""
Creates a data frame of origin airports, destination airports and weights for each route
Parameters
----------
file: int
Year of selected data
airline: string
Airline to get data from
include_data: string
Type of airline data to query from csv
can_limit: int
Cancellation limit
zs_limit: int
The z-score limit
Returns
-------
Returns a data frame containing each respective weighted route from an origin airport to a destination
Notes
-----
"""
df = atn_analysis.raw_query(db_path, file, airline)
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
file_str = int(str(file)[:4])
if calendar.isleap(file_str) == 1:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status
weight_values = [math.log(y, 10) for y in df_tuple.Weight.values]
for i in range(0, len(weight_values)):
df_tuple.Weight.values[i] = weight_values[i]
return(df_tuple)
def get_remove_list(db_path, file, include_data, airline, can_limit, zs_limit, processed_direc):
"""
Return a remove_list in a year (airline specific, include_data specific) based on cancelation limit and z_score limit.
Parameters
----------
file: int
Year of selected data
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
airline: string
Airline to get data from. This is the 2 letter airline code (ex: AA, UA, DL, WN)
can_limit: float
Cancellation Limit. Between 0 and 1
zs_limit: float
z-score limit. Between 0 and 1
Returns
-------
Pandas df
Notes
-----
"""
z_score_path = '%s%s_%s_Zdata_%s.csv'%(processed_direc, file,airline,include_data)
#df_score = pd.read_csv(raw_file_drop, index_col="Date")
df_score = pd.read_csv(z_score_path, index_col = "Day_of_Year")
df_score.index = pd.to_datetime(df_score.index)
airport_list = df_score.columns.tolist()
df = atn_analysis.raw_query(db_path,file,airline)
df = df[df['Origin_Airport_Code'].isin(airport_list)] # Filtering to make sure airports are equal in both directions
df = df[df['Destination_Airport_Code'].isin(airport_list)]
by_origin_count = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].count()
by_origin = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].sum()
by_origin.Can_Status = by_origin.Can_Status / by_origin_count.Can_Status
#print(by_origin)
df_score["idx"] = df_score.index
df_score = pd.melt(df_score, id_vars='idx', value_vars=airport_list)
df_score = df_score.sort_values(['idx', 'variable'], ascending=[True, True])
df_score.columns = ["Date", "Airports", "Z_Score"]
df_score.set_index('Date')
df_score["Cancellations"] = by_origin.Can_Status
### Creating the or conditions. First is the percentage of delayed flights and the second is the z-score
df_score["Z_score_9901"] = np.where((df_score['Cancellations'] > can_limit) | (df_score['Z_Score'] > zs_limit), 1, 0)
#print(df_score)
### Creating pivot table for easy manipulation. This creates the date as the index with the properties corresponding to
### it and finally repeats this trend for all airports being considered.
df_pivot = df_score.pivot_table('Z_score_9901', ['Date'], 'Airports')
#print(df_pivot)
s = np.asarray(np.where(df_pivot == 1, ['{}'.format(x) for x in df_pivot.columns], '')).tolist()
s_nested = []
for k in s:
p = list(filter(None,k))
#p = filter(None,k)
s_nested.append(p)
#s_nested.extend(p)
return s_nested
def inv_average_shortest_path_length(graph, weight=None):
"""
Creates an unweight inverse average path length graph
Parameters
----------
graph: python graph object
weight: default
Returns
-------
Returns the IAPL unweighted graph
Notes
-----
"""
avg = 0.0
if weight is None:
for node in graph:
avg_path_length = nx.single_source_shortest_path_length(graph, node) # get the shortest path lengths from source to all reachable nodes (unweighted)
del avg_path_length[node] # Deletes source node from the list to avoid division by 0
inv_avg_path_length = copy.deepcopy(avg_path_length)
inv_avg_path_length.update((x, 1/y) for x, y in avg_path_length.items())
avg += sum(inv_avg_path_length.values())
n = len(graph)
if n == 1 or n == 0:
return 0
else:
return avg/(n*(n-1))
def inv_average_shortest_path_length_W(graph, weight=None):
"""
Creates the table atn_performance in the database at the specified input location if one does not exist.
Parameters
----------
graph: python graph object
weight: default
Returns
-------
Returns the inverse average path length weighted graph
Notes
-----
"""
avg = 0.0
if weight is None:
for node in graph:
avg_path_length = nx.single_source_dijkstra_path_length(graph, node) # get the shortest path lengths from source to all reachable nodes (weighted)
del avg_path_length[node] # Deletes source node from the list to avoid division by 0
inv_avg_path_length = copy.deepcopy(avg_path_length)
inv_avg_path_length.update((x, 1/y) for x, y in avg_path_length.items())
avg += sum(inv_avg_path_length.values())
n = len(graph)
if n == 1 or n == 0:
return 0
else:
return avg/(n*(n-1))
def Data_Driven_W(file_list, airline_list, include_data, can_limit, zs_limit, processed_direc, graph_direc):
"""
Calculate the cluster size and IAPL for each day in a year after removal based on data-driven method.
Parameters
----------
file_list: list
List contaning years to process
airline_list: list
List contaning airlines to process
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
can_limit: float
Cancellation threshold
zs_limit: float
z-score threshold
Returns
-------
The cluster size and IAPL for each day of the year after removal based on data-driven method.
Notes
-----
"""
for file in file_list:
## iteration of years first
figure_num = 1
CSV_df = pd.DataFrame(columns = airline_list)
for airline in airline_list:
# CSV_df[airline] = [1,2,3,4]
# CSV_file = "%s_DD_IAPL.csv" %(file)
# CSV_df.to_csv(CSV_file, index=False)
## Get the directory path
script_dir = os.path.dirname(os.getcwd())
db_local_path = "data/processed/atn_db.sqlite"
## df set up from Keshav (NO CHANGE) (Weighted Graph)
df = pd.DataFrame()
db_path = os.path.join(script_dir, db_local_path)
fields = ["Origin_Airport_Code", "Destination_Airport_Code", "Can_Status"]
df_net = atn_analysis.raw_query(db_path,file,airline)
df["Origin_Airport_Code"] = df_net.Origin_Airport_Code
df["Destination_Airport_Code"] = df_net.Destination_Airport_Code
df["Can_Status"] = df_net.Can_Status
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
#print(df)
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
if int(file)%4 == 0:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status/days
df_tuple.Weight = 1/df_tuple.Weight
## Output lists initialization:
#day_IAPL = 0
day_CS = 0
#output_IAPL = []
output_CS = []
NoD = []
## Graph object initialization
graph = [tuple(x) for x in df_tuple.to_records(index=False)]
G = nx.Graph()
## Set up the weighted graph
G.add_weighted_edges_from(graph)
#print(G.nodes())
tempG = G.copy() #use temporary graph for the loop
## Remove list for the whole year
Total_Remove_List = get_remove_list(db_path,file,include_data, airline, can_limit, zs_limit,processed_direc)
if int(file)%4 == 0:
total_day = 366
else:
total_day = 365
for j in range(total_day):
## Remove the nodes in each day and get the CS and IAPL data
#day_IAPL = 0
Day_Remove_List = Total_Remove_List[j]
NoD.append(j)
for l in Day_Remove_List:
tempG.remove_node(l)
#largest_component_b = max(nx.connected_components(tempG), key=len)
#day_IAPL =(inv_average_shortest_path_length_W(tempG))
largest_component_b = max(nx.connected_components(tempG), key=len)
day_CS = len(largest_component_b)
#len(largest_component_b) = cluster size
#cluster fraction = cluster size/number of nodes
#output_IAPL.append(day_IAPL)
output_CS.append(day_CS)
#sum_IAPL = sum_IAPL + (inv_average_shortest_path_length(tempG))
tempG = G.copy()
## plotting command
plt.figure(figure_num)
#line = plt.plot(NoD,output_IAPL, label="{}".format(airline))
line = plt.plot(NoD,output_CS, label="{}".format(airline))
plt.legend()
#CSV_df[airline] = output_IAPL
CSV_df[airline] = output_CS
#CSV_file = "%s_DD_IAPL.csv" %(file)
CSV_file = "%s%s_DD_CS.csv" %(graph_direc,file)
CSV_df.to_csv(CSV_file, index=False)
#plt.title("{} Data Driven IAPL".format(str(file)))
plt.xlabel("Day")
#plt.ylabel("IAPL")
plt.ylabel("Cluster Size")
#plt.savefig("{}_Data_Driven_IAPL.png".format(str(file)))
plt.savefig("%s%s_Data_Driven_CS.png"%(graph_direc,file))
plt.show()
figure_num = figure_num + 1
def Pure_Graph_W_Shu(file_list, airline_list, include_data, processed_direc, rep_num):
"""
Calculate the linear algebraic connectivity, cluster size and IAPL for each day in a year after random removal based on Pure Graph method.
Random Removal set up by shuffle function
Parameters
----------
file_list: list
List contaning years to process
airline_list: list
List contaning airlines to process
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
rep_num: int
Number of repititions
Returns
-------
csv with the cluster size and IAPL for each day of the year after removal based on data-driven method.
Notes
-----
"""
for airline in airline_list:
rep_ite = 1
Total_AC = []
Total_Cluster_Size = []
Total_IAPL = []
for i in range(len(file_list)):
## initialize the output lists
Total_AC.append(0)
Total_Cluster_Size.append(0)
Total_IAPL.append(0)
## Save the data in csv
filename1 = "%s%s_ACR.csv" %(processed_direc,airline)
with open(filename1, 'w') as myfile1:
wr1 = csv.writer(myfile1, quoting=csv.QUOTE_ALL)
wr1.writerow(file_list)
filename2 = "%s%s_IAPLR.csv" %(processed_direc,airline)
with open(filename2, 'w') as myfile2:
wr2 = csv.writer(myfile2, quoting=csv.QUOTE_ALL)
wr2.writerow(file_list)
filename3 = "%s%s_CSR.csv" %(processed_direc,airline)
with open(filename3, 'w') as myfile3:
wr3 = csv.writer(myfile3, quoting=csv.QUOTE_ALL)
wr3.writerow(file_list)
while rep_ite < rep_num+1:
## start the reptition
year_IAPL = []
year_Cluster_Size = []
year_AC = []
for file in file_list:
## Get the directory path
script_dir = os.path.dirname(os.getcwd())
db_local_path = "data/processed/atn_db.sqlite"
## df set up from Keshav (NO CHANGE)
df = pd.DataFrame()
db_path = os.path.join(script_dir, db_local_path)
fields = ["Origin_Airport_Code", "Destination_Airport_Code", "Can_Status"]
#df_net = pd.read_csv(comb_file, usecols=fields)
df_net = atn_analysis.raw_query(db_path,file,airline)
df["Origin_Airport_Code"] = df_net.Origin_Airport_Code
df["Destination_Airport_Code"] = df_net.Destination_Airport_Code
df["Can_Status"] = df_net.Can_Status
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
#print(df)
df_tuple = | pd.DataFrame() | pandas.DataFrame |
import cobra
from cobra.core.metabolite import elements_and_molecular_weights
elements_and_molecular_weights['R']=0.0
elements_and_molecular_weights['Z']=0.0
import pandas as pd
import numpy as np
import csv
#### Change Biomass composition
# define a function change a biomass reaction in the model
def update_biomass(model, rxn, stoich, metabolite):
r = model.reactions.get_by_id(rxn)
new_stoich = stoich
# you now have a dictionary of new stoichs for your model
for m,s in r.metabolites.items():
stoich = s*-1
temp_dict = {m:stoich}
r.add_metabolites(temp_dict)
r.add_metabolites(new_stoich)
# Then get the total to equal 1 mg biomass DW
total = 0
for m,s in r.metabolites.items():
gfw = model.metabolites.get_by_id(m.id).formula_weight
mass = gfw*s*-1
total = total+mass
correction = total/1000 # this will get it to 1000 ug total mass
# Then adjust the stoichiometry as appropriate
for m,s in r.metabolites.items(): # now change the stoich
to_add = s/correction-s
r.add_metabolites({m:to_add})
# Finally build the biomass_c metabolite
imbal = r.check_mass_balance()
if 'charge' in imbal.keys():
met_charge = imbal['charge']*-1
del imbal['charge']
met_mass = 0
formula_string = ''
for e,v in imbal.items():
if v > 1e-10 or v < -1e-10:
mass = elements_and_molecular_weights[e]
met_mass = met_mass+(mass*-1*v)
form_str = e+str(-1*v)
formula_string = formula_string + form_str
met = model.metabolites.get_by_id(metabolite)
met.formula = formula_string
met.charge = met_charge
r.add_metabolites({met:1})
# Add GAM constraint
if rxn == 'bof_c':
gam_met = model.metabolites.GAM_const_c
r.add_metabolites({gam_met:1})
model.repair()
print(model.reactions.get_by_id(rxn).reaction)
print('')
print(model.metabolites.get_by_id(met.id).formula)
print('')
print(model.metabolites.get_by_id(met.id).formula_weight)
print('')
print(model.reactions.get_by_id(rxn).check_mass_balance())
return model
#############################################
#### Simulate growth with all constraints####
#############################################
def figure_2LL(model):
## Run all 27 parameter combos and capture:
#### - growth rate
#### - biomass
import math as m
cols = ['ID','GR','mgDW','Cells']
data_out = | pd.DataFrame(columns=cols) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import SGD, Adagrad, RMSprop, Adadelta, Adamax, Adam
from tensorflow.keras.models import model_from_json
from tensorflow.keras import backend as K
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import math
class Preprocessing:
def __init__(self):
self.trainScore = 0
self.testScore = 0
self.rmseTrain = 0
self.rmseTest = 0
"load data input"
def load_data(self, file):
data = | pd.read_excel(file) | pandas.read_excel |
""" Functions for analyzing openmc tally results
This scripts contains functions for analyzing the tallies
from the openmc statepoint file and manipulate the data
into what is required in the criticality
part (phase1a) of the FHR benchmark.
"""
import numpy as np
import pylab as pl
import matplotlib.colorbar as cbar
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.insert(1, '../../scripts/')
from phase1a_constants import *
###############################################################################
# Criticality Functions
###############################################################################
def flux_conv(df, sp_power, k, kerr):
"""Converts flux unit from [n*cm/src] to [n/cm^2*s]
Parameters
----------
df: pandas dataframe with flux, nu-fission, and fission values
sp_power: float
specific power of reactor [W/gU]
k: keff [n/src]
kerr: keff uncertainty [n/src]
Returns
-------
flux: np.array(float)
array of flux in [n/cm2*s] for each energy group
flux_err: np.array(float)
array of flux uncertainties in [n/cm2*s] for each energy group
"""
P = 245486.6796001383 # W
Q = 200 * 1.6022e-13 # J/fission
nu_fission = np.array(
df[df['score'].str.match('nu-fission')]['mean']) # n/src
fission = np.array(df[df['score'].str.match('fission')]
['mean']) # fission/src
og_flux = np.array(df[df['score'].str.match('flux')]['mean']) # n*cm/src
nu_fission_err = np.array(
df[df['score'].str.match('nu-fission')]['std. dev.'])
fission_err = np.array(df[df['score'].str.match('fission')]['std. dev.'])
og_flux_err = np.array(df[df['score'].str.match('flux')]['std. dev.'])
nu = nu_fission / fission # n/fission
N = P * nu / (Q * k) # src/s
V = 3 * np.sqrt(3) / 2 * H_side ** 2 * z_thickness * T_pitch # cm3
flux = 1 / V * N * og_flux # n/(cm2*s)
flux[np.isnan(flux)] = 0
flux_err = (np.sqrt((nu_fission_err / nu_fission)**2 +
(fission_err / fission)**2 + (og_flux_err / og_flux)**2 +
(kerr / k)**2)) * flux
flux_err[np.isnan(flux_err)] = 0
return flux, flux_err
def beta_b(sp, case):
"""Returns Beta-effective and its uncertainty
Parameters
----------
sp: openmc.statepoint.StatePoint
this statepoint.h5 file is created by running the openmc
executable on the xml files generated by the build_xml.py
files and when tallies_on toggle is on True.
case: str
case number for naming files
Returns
-------
This function generates a csv file with beta-effective
and its uncertainty.
"""
name = 'analysis_output/p1a_' + case + '_b'
mesh_tally_b = sp.get_tally(name='mesh tally b')
df_b = mesh_tally_b.get_pandas_dataframe()
beta = df_b['mean'][0] / df_b['mean'][1]
beta_err = beta * np.sqrt((df_b['std. dev.'][0] / df_b['mean'][0])
** 2 + (df_b['std. dev.'][1] / df_b['mean'][1])**2)
df_bb = pd.DataFrame()
df_bb['beta'] = [beta]
df_bb['beta err'] = [beta_err]
df_bb.to_csv(name + '.csv')
return
def reactivity_coefficient_b(
keff_og,
keff_og_unc,
keff_new,
keff_new_unc,
temp_change):
"""Generates the reactivity coefficient and its uncertainty
Parameters
----------
keff_og: float
original keff
keff_og_unc: float
original keff's uncertainty
keff_new: float
keff after temperature change
keff_og_new: float
keff's uncertainty after temperature change
temp_change: float
temperature change (be sure to include +/- sign)
Returns
-------
coeff: float
reactivity coefficient
coeff_unc: float
reactivity coefficient uncertainty
"""
coeff = (keff_new * 1e5 - keff_og * 1e5) / temp_change
coeff_unc = np.sqrt((keff_og_unc * 1e5)**2 +
(keff_new_unc * 1e5)**2) / temp_change
return coeff, coeff_unc
def fission_density_c(sp, case):
"""Generates a csv and png file with results of fission source
distribution by 1/5 stripes for phase 1a-c of the benchmark
in the analysis_output folder.
Parameters
----------
sp: openmc.statepoint.StatePoint
this statepoint.h5 file is created by running the openmc
executable on the xml files generated by the build_xml.py
files and when tallies_on toggle is on True.
case: str
case number for naming files
Returns
-------
This function generates a csv file with fission density results
and visualization of fission source distribution by 1/5 stripe
for phase 1a-c of the benchmark.
"""
name = 'analysis_output/' + case + '_c'
region = ['1', '2', '3', '4', '5']
fission_rates = []
num = 1
for x in range(1, 13):
mesh_tally = sp.get_tally(name='mesh tally c' + str(x))
a = mesh_tally.get_pandas_dataframe()
a = a.drop(columns=['mesh ' + str(x), 'nuclide', 'score'])
if (x % 2) == 0:
num = int(x / 2)
stripe = [str(num) + 'B'] * 5
else:
num = int((x + 1) / 2)
stripe = [str(num) + 'T'] * 5
a['Region'] = region
a['Stripe'] = stripe
if x == 1:
df = a
else:
df = df.append(a)
b = a['mean'].to_numpy()
fission_rates.append(b)
df = df.set_index(['Stripe', 'Region'])
ave = df['mean'].mean()
df['Fission Density'] = df['mean'] / ave
ave_sd = 1 / (len(df['mean'])) * np.sqrt(np.sum(df['std. dev.']**2))
df['FD std dev'] = df['Fission Density'] * \
np.sqrt((df['std. dev.'] / df['mean'])**2 + (ave_sd / ave)**2)
df['Relative unc.'] = df['FD std dev'] / df['Fission Density']
df.to_csv(name + '.csv')
xs = []
ys = []
ws = np.array([F_len / 5] * 60)
hs = np.array([F_width] * 60)
fission_rates /= np.mean(fission_rates)
vs = fission_rates.flatten()
for p in range(6):
for f in range(2):
x_trans = p * T['A1']['P']['x']
y_trans = p * T['A1']['P']['y']
if f == 1:
x_trans += T['A1']['F']['x']
y_trans += T['A1']['F']['y']
for s in range(5):
if s > 0:
x_trans += F_len / 5
xs.append(V['A1']['F']['L']['x'] + x_trans)
ys.append(V['A1']['F']['B']['y'] + y_trans)
normal = pl.Normalize(vs.min(), vs.max())
colors = pl.cm.YlOrRd(normal(vs))
fig, ax = plt.subplots()
for x, y, w, h, c in zip(xs, ys, ws, hs, colors):
rect = pl.Rectangle((x, y), w, h, color=c)
ax.add_patch(rect)
cax, _ = cbar.make_axes(ax)
cb2 = cbar.ColorbarBase(cax, cmap=pl.cm.YlOrRd, norm=normal)
ax.set_xlim(-25, 12)
ax.set_ylim(-25, 0)
ax.set_xlabel('x [cm]')
ax.set_ylabel('y [cm]')
ax.set_title('Case ' + case + ': Normalized Fission Source')
pl.savefig(name, bbox_inches='tight')
return
def neutron_flux_d(sp, k, kerr, case):
"""Generates a csv file with results of neutron flux
averaged over the whole model, tabulated in 3 coarse energy groups
(upper energy boundaries 3 eV for thermal group and 0.1 MeV for
intermediate group) in the analysis_output folder.
Parameters
----------
sp: openmc.statepoint.StatePoint
this statepoint.h5 file is created by running the openmc
executable on the xml files generated by the build_xml.py
files and when tallies_on toggle is on True.
k: float
k-effective
kerr: float
k-effective uncertainty
case: str
case number for naming files
Returns
-------
This function generates a csv file with neutron flux results
"""
name = 'analysis_output/' + case + '_d'
mesh_tally_d = sp.get_tally(name='mesh tally d')
df_d = mesh_tally_d.get_pandas_dataframe()
df_dd = pd.DataFrame(index=['E3', 'E2', 'E1'])
df_dd['flux'], df_dd['flux_err'] = flux_conv(df_d, 200, k, kerr)
df_dd['relative_err_p'] = df_dd['flux_err'] / df_dd['flux']
df_dd = df_dd.reindex(['E1', 'E2', 'E3'])
df_dd.to_csv(name + '.csv')
return
def neutron_flux_e(sp, k, case):
"""Generates a csv file and png files with results of neutron flux
at 10000 points in model, tabulated in 3 coarse energy groups
(upper energy boundaries 3 eV for thermal group and 0.1 MeV for
intermediate group)
Parameters
----------
sp: openmc.statepoint.StatePoint
this statepoint.h5 file is created by running the openmc
executable on the xml files generated by the build_xml.py
files and when tallies_on toggle is on True.
k: float
k-effective
case: str
case number for naming files
Returns
-------
This function generates a csv file with neutron flux results
at 10000 points in the model for 3 energy groups, and 3 png
files visualizing the neutron flux distribution for 3
energy groups.
"""
name = 'analysis_output/' + case + '_e'
mesh_tally_e = sp.get_tally(name='mesh tally e')
flux = mesh_tally_e.get_slice(scores=['flux'])
nu_fission = mesh_tally_e.get_slice(scores=['nu-fission'])
fission = mesh_tally_e.get_slice(scores=['fission'])
flux_conv = {}
eg_names = ['eg3', 'eg2', 'eg1']
egs = [(1e-5, 3), (3, 0.1e6), (0.1e6, 20e6)]
P = 245486.6796001383
Q = 200 * 1.6022e-13
V = 3 * np.sqrt(3) / 2 * H_side ** 2 * z_thickness * T_pitch / (100 * 100)
for x in range(3):
flux_eg = flux.get_slice(
filters=[
openmc.EnergyFilter], filter_bins=[
(egs[x],)])
nu_fiss_eg = nu_fission.get_slice(
filters=[
openmc.EnergyFilter], filter_bins=[
(egs[x],)])
fiss_eg = fission.get_slice(
filters=[
openmc.EnergyFilter], filter_bins=[
(egs[x],)])
nu = nu_fiss_eg.mean / fiss_eg.mean
nu = np.nanmean(nu)
N = P * nu / (Q * k)
flux_conv[eg_names[x]] = flux_eg.mean * 1 / V * N
flux_conv[eg_names[x]].shape = (100, 100)
flux_conv[eg_names[x]][np.isnan(flux_conv[eg_names[x]])] = 0
plt.figure()
plt.imshow(
flux_conv['eg1'] /
np.mean(
flux_conv['eg1']),
interpolation='none',
origin='lower',
cmap='viridis')
plt.colorbar()
plt.title('Case ' + case + ' Energy Group 1 Flux Distribution')
plt.savefig(name + '_eg1')
np.savetxt(name + "_eg1.csv", np.flip(flux_conv['eg1'], 0), delimiter=",")
plt.figure()
plt.imshow(
flux_conv['eg2'] /
np.mean(
flux_conv['eg2']),
interpolation='none',
origin='lower',
cmap='viridis')
plt.colorbar()
plt.title('Case ' + case + ' Energy Group 2 Flux Distribution')
plt.savefig(name + '_eg2')
np.savetxt(name + "_eg2.csv", np.flip(flux_conv['eg2'], 0), delimiter=",")
plt.figure()
plt.imshow(
flux_conv['eg3'] /
np.mean(
flux_conv['eg3']),
interpolation='none',
origin='lower',
cmap='viridis')
plt.colorbar()
plt.title('Case ' + case + ' Energy Group 3 Flux Distribution')
plt.savefig(name + '_eg3')
np.savetxt(name + "_eg3.csv", np.flip(flux_conv['eg3'], 0), delimiter=",")
return
def neutron_spectrum_f(sp, case, k, kerr):
"""Generates a csv file and png file with results of neutron
spectrum averaged over the fuel assembly.
Parameters
----------
sp: openmc.statepoint.StatePoint
this statepoint.h5 file is created by running the openmc
executable on the xml files generated by the build_xml.py
files and when tallies_on toggle is on True.
case: str
case number for naming files
Returns
-------
This function generates a csv and png file with results of neutron
spectrum averaged over the fuel assembly.
"""
name = 'analysis_output/' + case + '_f'
mesh_tally_f = sp.get_tally(name='mesh tally f')
df_f = mesh_tally_f.get_pandas_dataframe()
index_list = []
for x in range(252):
index_list += ['E' + str(x + 1)]
df_ff = | pd.DataFrame(index=index_list) | pandas.DataFrame |
import matplotlib.image as mpimg
import matplotlib.style as style
import matplotlib.pyplot as plt
from matplotlib import rcParams
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import seaborn as sns
from math import exp
import pandas as pd
import mdtraj as md
import pickle as pk
import numpy as np
import statistics
import itertools
import fileinput
import fnmatch
import shutil
import random
import math
import os
import re
def fix_cap_remove_ace(pdb_file):
"""
Removes the H atoms of the capped ACE residue.
"""
remove_words = [
"H1 ACE",
"H2 ACE",
"H3 ACE",
"H31 ACE",
"H32 ACE",
"H33 ACE",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_ace(pdb_file):
"""
Replaces the alpha carbon atom of the
capped ACE residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA ACE", "CH3 ACE")
data = data.replace("C ACE", "CH3 ACE")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def fix_cap_remove_nme(pdb_file):
"""
Removes the H atoms of the capped NME residue.
"""
remove_words = [
"H1 NME",
"H2 NME",
"H3 NME",
"H31 NME",
"H32 NME",
"H33 NME",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_nme(pdb_file):
"""
Replaces the alpha carbon atom of the
capped NME residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA NME", "CH3 NME")
data = data.replace("C NME", "CH3 NME")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def prepare_alanine_dipeptide():
"""
Prepares the alanine dipeptide system for Gaussian
Accelerated Molecular Dynamics (GaMD) simulations.
Downloads the pdb structure from
https://markovmodel.github.io/mdshare/ALA2/ and
parameterizes it using General Amber Force Field
(GAFF).
"""
os.system(
"curl -O http://ftp.imp.fu-berlin.de/pub/cmb-data/alanine-dipeptide-nowater.pdb"
)
os.system(
"rm -rf system_inputs"
) # Removes any existing directory named system_inputs
os.system("mkdir system_inputs") # Creates a directory named system_inputs
cwd = os.getcwd()
target_dir = cwd + "/" + "system_inputs"
os.system("pdb4amber -i alanine-dipeptide-nowater.pdb -o intermediate.pdb")
# Delete HH31, HH32 and HH33 from the ACE residue (tleap adds them later)
remove_words = ["HH31 ACE", "HH32 ACE", "HH33 ACE"]
with open("intermediate.pdb") as oldfile, open(
"system.pdb", "w"
) as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
os.system("rm -rf intermediate*")
# save the tleap script to file
with open("input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system.pdb
solvateBox pdb TIP3PBOX 15
saveamberparm pdb system_TIP3P.prmtop system_TIP3P.inpcrd
saveamberparm pdb system_TIP3P.parm7 system_TIP3P.rst7
savepdb pdb system_TIP3P.pdb
quit
"""
)
os.system("tleap -f input_TIP3P.leap")
os.system("rm -rf leap.log")
shutil.copy(
cwd + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_TIP3P.pdb", target_dir + "/" + "system_TIP3P.pdb"
)
shutil.copy(
cwd + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_TIP3P.rst7", target_dir + "/" + "system_TIP3P.rst7"
)
shutil.copy(cwd + "/" + "system.pdb", target_dir + "/" + "system.pdb")
shutil.copy(
cwd + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "input_TIP3P.leap", target_dir + "/" + "input_TIP3P.leap"
)
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf input_TIP3P.leap")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
def create_vectors(x):
"""
Extracts peridic box information from the
given line.
"""
x = str(x)
x = x.replace("Vec3", "")
x = re.findall("\d*\.?\d+", x)
for i in range(0, len(x)):
x[i] = float(x[i])
x = tuple(x)
n = int(len(x) / 3)
x = [x[i * n : (i + 1) * n] for i in range((len(x) + n - 1) // n)]
return x
def simulated_annealing(
parm="system_TIP3P.prmtop",
rst="system_TIP3P.inpcrd",
annealing_output_pdb="system_annealing_output.pdb",
annealing_steps=100000,
pdb_freq=100000,
starting_temp=0,
target_temp=300,
temp_incr=3,
):
"""
Performs simulated annealing of the system from
0K to 300 K (default) using OpenMM MD engine and
saves the last frame of the simulation to be
accessed by the next simulation.
Parameters
----------
parm: str
System's topology file
rst: str
System's coordinate file
annealing_output_pdb: str
System's output trajectory file
annealing_steps: int
Aneealing steps at each temperatrure jump
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
starting_temp: int
Initial temperature of Simulated Annealing
target_temp: int
Final temperature of Simulated Annealing
temp_incr: int
Temmperature increase for every step
"""
prmtop = AmberPrmtopFile(parm)
inpcrd = AmberInpcrdFile(rst)
annealing_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
annealing_integrator = LangevinIntegrator(
0 * kelvin, 1 / picosecond, 2 * femtoseconds
)
total_steps = ((target_temp / temp_incr) + 1) * annealing_steps
annealing_temp_range = int((target_temp / temp_incr) + 1)
annealing_platform = Platform.getPlatformByName("CUDA")
annealing_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
annealing_simulation = Simulation(
prmtop.topology,
annealing_system,
annealing_integrator,
annealing_platform,
annealing_properties,
)
annealing_simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
annealing_simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
annealing_simulation.minimizeEnergy()
annealing_simulation.reporters.append(
PDBReporter(annealing_output_pdb, pdb_freq)
)
simulated_annealing_last_frame = (
annealing_output_pdb[:-4] + "_last_frame.pdb"
)
annealing_simulation.reporters.append(
PDBReporter(simulated_annealing_last_frame, total_steps)
)
annealing_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=total_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
temp = starting_temp
while temp <= target_temp:
annealing_integrator.setTemperature(temp * kelvin)
if temp == starting_temp:
annealing_simulation.step(annealing_steps)
annealing_simulation.saveState("annealing.state")
else:
annealing_simulation.loadState("annealing.state")
annealing_simulation.step(annealing_steps)
temp += temp_incr
state = annealing_simulation.context.getState()
print(state.getPeriodicBoxVectors())
annealing_simulation_box_vectors = state.getPeriodicBoxVectors()
print(annealing_simulation_box_vectors)
with open("annealing_simulation_box_vectors.pkl", "wb") as f:
pk.dump(annealing_simulation_box_vectors, f)
print("Finshed NVT Simulated Annealing Simulation")
def npt_equilibration(
parm="system_TIP3P.prmtop",
npt_output_pdb="system_npt_output.pdb",
pdb_freq=500000,
npt_steps=5000000,
target_temp=300,
npt_pdb="system_annealing_output_last_frame.pdb",
):
"""
Performs NPT equilibration MD of the system
using OpenMM MD engine and saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
npt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
npt_steps: int
NPT simulation steps
target_temp: int
Temperature for MD simulation
npt_pdb: str
Last frame of the simulation
"""
npt_init_pdb = PDBFile(npt_pdb)
prmtop = AmberPrmtopFile(parm)
npt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
barostat = MonteCarloBarostat(25.0 * bar, target_temp * kelvin, 25)
npt_system.addForce(barostat)
npt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
npt_platform = Platform.getPlatformByName("CUDA")
npt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
npt_simulation = Simulation(
prmtop.topology,
npt_system,
npt_integrator,
npt_platform,
npt_properties,
)
npt_simulation.context.setPositions(npt_init_pdb.positions)
npt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("annealing_simulation_box_vectors.pkl", "rb") as f:
annealing_simulation_box_vectors = pk.load(f)
annealing_simulation_box_vectors = create_vectors(
annealing_simulation_box_vectors
)
npt_simulation.context.setPeriodicBoxVectors(
annealing_simulation_box_vectors[0],
annealing_simulation_box_vectors[1],
annealing_simulation_box_vectors[2],
)
npt_last_frame = npt_output_pdb[:-4] + "_last_frame.pdb"
npt_simulation.reporters.append(PDBReporter(npt_output_pdb, pdb_freq))
npt_simulation.reporters.append(PDBReporter(npt_last_frame, npt_steps))
npt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=npt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
npt_simulation.minimizeEnergy()
npt_simulation.step(npt_steps)
npt_simulation.saveState("npt_simulation.state")
state = npt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
npt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(npt_simulation_box_vectors)
with open("npt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(npt_simulation_box_vectors, f)
print("Finished NPT Simulation")
def nvt_equilibration(
parm="system_TIP3P.prmtop",
nvt_output_pdb="system_nvt_output.pdb",
pdb_freq=500000,
nvt_steps=5000000,
target_temp=300,
nvt_pdb="system_npt_output_last_frame.pdb",
):
"""
Performs NVT equilibration MD of the system
using OpenMM MD engine saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
nvt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
nvt_steps: int
NVT simulation steps
target_temp: int
Temperature for MD simulation
nvt_pdb: str
Last frame of the simulation
"""
nvt_init_pdb = PDBFile(nvt_pdb)
prmtop = AmberPrmtopFile(parm)
nvt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
nvt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
nvt_platform = Platform.getPlatformByName("CUDA")
nvt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
nvt_simulation = Simulation(
prmtop.topology,
nvt_system,
nvt_integrator,
nvt_platform,
nvt_properties,
)
nvt_simulation.context.setPositions(nvt_init_pdb.positions)
nvt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("npt_simulation_box_vectors.pkl", "rb") as f:
npt_simulation_box_vectors = pk.load(f)
npt_simulation_box_vectors = create_vectors(npt_simulation_box_vectors)
nvt_simulation.context.setPeriodicBoxVectors(
npt_simulation_box_vectors[0],
npt_simulation_box_vectors[1],
npt_simulation_box_vectors[2],
)
nvt_last_frame = nvt_output_pdb[:-4] + "_last_frame.pdb"
nvt_simulation.reporters.append(PDBReporter(nvt_output_pdb, pdb_freq))
nvt_simulation.reporters.append(PDBReporter(nvt_last_frame, nvt_steps))
nvt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=nvt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
nvt_simulation.minimizeEnergy()
nvt_simulation.step(nvt_steps)
nvt_simulation.saveState("nvt_simulation.state")
state = nvt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
nvt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(nvt_simulation_box_vectors)
with open("nvt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(nvt_simulation_box_vectors, f)
print("Finished NVT Simulation")
def run_equilibration():
"""
Runs systematic simulated annealing followed by
NPT and NVT equilibration MD simulation.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "equilibration"
os.system("rm -rf equilibration")
os.system("mkdir equilibration")
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.pdb",
target_dir + "/" + "system_TIP3P.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.rst7",
target_dir + "/" + "system_TIP3P.rst7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system.pdb",
target_dir + "/" + "system.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "input_TIP3P.leap",
target_dir + "/" + "input_TIP3P.leap",
)
os.chdir(target_dir)
simulated_annealing()
npt_equilibration()
nvt_equilibration()
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
os.system("rm -rf input_TIP3P.leap")
os.chdir(cwd)
def create_starting_structures():
"""
Prepares starting structures for Amber GaMD simulations.
All input files required to run Amber GaMD simulations are
placed in the starting_structures directory.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
os.system("rm -rf starting_structures")
os.system("mkdir starting_structures")
shutil.copy(
cwd + "/" + "equilibration" + "/" + "system_nvt_output_last_frame.pdb",
target_dir + "/" + "system_nvt_output_last_frame.pdb",
)
os.chdir(target_dir)
fix_cap_remove_nme("system_nvt_output_last_frame.pdb")
fix_cap_replace_nme("system_nvt_output_last_frame.pdb")
# Save the tleap script to file
with open("final_input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system_nvt_output_last_frame.pdb
saveamberparm pdb system_final.prmtop system_final.inpcrd
saveamberparm pdb system_final.parm7 system_final.rst7
savepdb pdb system_final.pdb
quit
"""
)
os.system("tleap -f final_input_TIP3P.leap")
os.system("rm -rf leap.log")
os.system("rm -rf system_nvt_output_last_frame.pdb")
os.chdir(cwd)
def add_vec_inpcrd():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the inpcrd file.
Only to be used when the box dimensions are not
present in the inpcrd file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
(nvt_simulation_box_vectors[0][0]) * 10,
(nvt_simulation_box_vectors[1][1]) * 10,
(nvt_simulation_box_vectors[2][2]) * 10,
)
vectors = (
round(vectors[0], 7),
round(vectors[1], 7),
round(vectors[2], 7),
)
last_line = (
" "
+ str(vectors[0])
+ " "
+ str(vectors[1])
+ " "
+ str(vectors[2])
+ " 90.0000000"
+ " 90.0000000"
+ " 90.0000000"
)
with open("system_final.inpcrd", "a+") as f:
f.write(last_line)
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def add_vec_prmtop():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the prmtop file.
Only to be used when the box dimensions are not
present in the prmtop file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
nvt_simulation_box_vectors[0][0],
nvt_simulation_box_vectors[1][1],
nvt_simulation_box_vectors[2][2],
)
vectors = round(vectors[0], 7), round(vectors[1], 7), round(vectors[2], 7)
oldbeta = "9.00000000E+01"
x = str(vectors[0]) + str(0) + "E+" + "01"
y = str(vectors[1]) + str(0) + "E+" + "01"
z = str(vectors[2]) + str(0) + "E+" + "01"
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
line3 = " " + oldbeta + " " + x + " " + y + " " + z
with open("system_final.prmtop") as i, open(
"system_intermediate_final.prmtop", "w"
) as f:
for line in i:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f.write(line)
os.system("rm -rf system_final.prmtop")
os.system("mv system_intermediate_final.prmtop system_final.prmtop")
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def create_filetree(
nst_lim=26000000,
ntw_x=1000,
nt_cmd=1000000,
n_teb=1000000,
n_tave=50000,
ntcmd_prep=200000,
nteb_prep=200000,
):
"""
Creates a directory named gamd_simulations. Inside
this directory, there are subdirectories for dihedral,
dual and total potential-boosted GaMD with upper and
lower threshold boosts separately.
Parameters
----------
nst_lim: int
Total simulation time including preparatory simulation.
For example, if nst_lim = 26000000, then, we may have
2 ns of preparatory simulation i.e. 1000000 preparation steps
and 50 ns of GaMD simulation i.e. 25000000 simulation steps
ntw_x: int
Saving coordinates of the simulation every ntw_x
timesteps. For example, 2 ps implies 1000 timesteps
nt_cmd: int
Number of initial MD simulation step, 2 ns of
preparatory simulation requires 1000000 preparation
timesteps
n_teb: int
Number of biasing MD simulation steps
n_tave: int
Number of simulation steps used to calculate the
average and standard deviation of potential energies
ntcmd_prep: int
Number of preparation conventional molecular dynamics
steps.This is used for system equilibration and
potential energies are not collected for statistics
nteb_prep: int
Number of preparation biasing molecular dynamics
simulation steps. This is used for system
equilibration
"""
cwd = os.getcwd()
os.system("rm -rf gamd_simulations")
os.system("mkdir gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations")
source_dir = cwd + "/" + "starting_structures"
target_dir = cwd + "/" + "gamd_simulations"
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
for i in range(len(dir_list)):
os.mkdir(dir_list[i])
os.chdir(target_dir + "/" + dir_list[i])
shutil.copy(
source_dir + "/" + "system_final.inpcrd",
target_dir + "/" + dir_list[i] + "/" + "system_final.inpcrd",
)
shutil.copy(
source_dir + "/" + "system_final.prmtop",
target_dir + "/" + dir_list[i] + "/" + "system_final.prmtop",
)
if "lower" in dir_list[i]:
i_E = 1
if "upper" in dir_list[i]:
i_E = 2
if "total" in dir_list[i]:
i_gamd = 1
if "dihedral" in dir_list[i]:
i_gamd = 2
if "dual" in dir_list[i]:
i_gamd = 3
with open("md.in", "w") as f:
f.write("&cntrl" + "\n")
f.write(" imin = 0, irest = 0, ntx = 1," + "\n")
f.write(" nstlim = " + str(nst_lim) + ", dt = 0.002," + "\n")
f.write(" ntc = 2, ntf = 2, tol = 0.000001," + "\n")
f.write(" iwrap = 1, ntb = 1, cut = 8.0," + "\n")
f.write(" ntt = 3, temp0 = 300.0, gamma_ln = 1.0, " + "\n")
f.write(
" ntpr = 500, ntwx = " + str(ntw_x) + ", ntwr = 500," + "\n"
)
f.write(" ntxo = 2, ioutfm = 1, ig = -1, ntwprt = 0," + "\n")
f.write(
" igamd = "
+ str(i_gamd)
+ ", iE = "
+ str(i_E)
+ ", irest_gamd = 0,"
+ "\n"
)
f.write(
" ntcmd = "
+ str(nt_cmd)
+ ", nteb = "
+ str(n_teb)
+ ", ntave = "
+ str(n_tave)
+ ","
+ "\n"
)
f.write(
" ntcmdprep = "
+ str(ntcmd_prep)
+ ", ntebprep = "
+ str(nteb_prep)
+ ","
+ "\n"
)
f.write(" sigma0D = 6.0, sigma0P = 6.0" + " \n")
f.write("&end" + "\n")
os.chdir(target_dir)
os.chdir(cwd)
def run_simulations():
"""
Runs GaMD simulations for each of the dihedral, dual and total
potential boosts for both thresholds i.e. upper and lower potential
thresholds. (Remember to check md.in files for further details and
flag information).
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd)
def create_data_files(
jump=10,
traj="system_final.nc",
topology="system_final.prmtop",
T=300,
):
"""
Extracts data from GaMD log files and saves them as
weights.dat, Psi.dat and Phi_Psi.dat. gamd.log file
contains data excluding the initial equilibration MD
simulation steps but trajectory output file has all
the trajectories including the initial equilibration
MD steps. This part has ben taken care to make the
data consistent.
Parameters
----------
jump: int
Every nth frame to be considered for reweighting
traj: str
System's trajectory file
topology: str
System's topology file
T: int
MD simulation temperature
"""
# To make data consistent with gamd.log and .nc file
factor = 0.001987 * T
with open("md.in") as f:
lines = f.readlines()
for i in lines:
if "nstlim =" in i:
nstlim_line = i
if "ntcmd =" in i:
ntcmd_line = i
if "ntwx =" in i:
ntwx_line = i
x = re.findall(r"\b\d+\b", ntcmd_line)
ntcmd = int(x[0])
x = re.findall(r"\b\d+\b", nstlim_line)
nstlim = int(x[0])
x = re.findall(r"\b\d+\b", ntwx_line)
ntwx = int(x[1])
# From the .nc trajectory files, we will not consider ntcmd trajectories
leave_frames = int(ntcmd / ntwx)
no_frames = int(nstlim / ntwx)
# Recheck conditions
file = open("gamd.log", "r")
number_of_lines = 0
for line in file:
line = line.strip("\n")
number_of_lines += 1
file.close()
f = open("gamd.log")
fourth_line = f.readlines()[3]
if str(ntcmd) in fourth_line:
datapoints = number_of_lines - 4
if not str(ntcmd) in fourth_line:
datapoints = number_of_lines - 3
print(datapoints == int((nstlim - ntcmd) / ntwx))
# Creating Psi.dat and Phi_Psi.dat
traj = md.load(traj, top=topology)
traj = traj[leave_frames:no_frames:jump]
phi = md.compute_phi(traj)
phi = phi[1] # 0:indices, 1:phi angles
phi = np.array([math.degrees(i) for i in phi]) # radians to degrees
psi = md.compute_psi(traj)
psi = psi[1] # 0:indices, 1:psi angles
psi = np.array([math.degrees(i) for i in psi]) # radians to degrees
df_psi = pd.DataFrame(phi, columns=["Psi"])
df_psi = df_psi.tail(int(datapoints))
df_psi.to_csv("Psi.dat", sep="\t", index=False, header=False)
df_phi = pd.DataFrame(psi, columns=["Phi"])
df_phi = df_phi.tail(int(datapoints))
df_phi_psi = pd.concat([df_phi, df_psi], axis=1)
df_phi_psi.to_csv("Phi_Psi.dat", sep="\t", index=False, header=False)
# Creating weights.dat
with open("gamd.log") as f:
lines = f.readlines()
column_names = lines[2]
column_names = column_names.replace("#", "")
column_names = column_names.replace("\n", "")
column_names = column_names.replace(" ", "")
column_names = column_names.split(",")
list_words = ["#"]
with open("gamd.log") as oldfile, open("data.log", "w") as newfile:
for line in oldfile:
if not any(word in line for word in list_words):
newfile.write(line)
df = pd.read_csv("data.log", delim_whitespace=True, header=None)
df.columns = column_names
df["dV(kcal/mol)"] = (
df["Boost-Energy-Potential"] + df["Boost-Energy-Dihedral"]
)
df["dV(kbT)"] = df["dV(kcal/mol)"] / factor
df_ = df[["dV(kbT)", "total_nstep", "dV(kcal/mol)"]]
df_ = df_[::jump]
df_.to_csv("weights.dat", sep="\t", index=False, header=False)
os.system("rm -rf data.log")
print(df_phi_psi.shape)
print(df_phi.shape)
print(df_.shape)
def create_bins(lower_bound, width, upper_bound):
"""
Creates bin if given the lower and upper bound
with the wirdth information.
"""
bins = []
for low in range(lower_bound, upper_bound, width):
bins.append([low, low + width])
return bins
def find_bin(value, bins):
"""
Finds which value belongs to which bin.
"""
for i in range(0, len(bins)):
if bins[i][0] <= value < bins[i][1]:
return i
return -1
def reweight_1d(
binspace=10, n_structures=4, Xdim=[-180, 180], T=300.0, min_prob=0.000001
):
"""
Reweights boosted potential energies in one-dimension based on
Maclaurin series expansion to one, two and three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Psi = pd.read_csv("Psi.dat", delim_whitespace=True, header=None)
df_Psi.columns = ["Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
hist, hist_edges = np.histogram(df_Psi[["Psi"]], bins=binsX, weights=None)
pstarA = [i / sum_total for i in list(hist)]
bins = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
data = df_Psi["Psi"].values.tolist()
binned_weights = []
for value in data:
bin_index = find_bin(value, bins)
binned_weights.append(bin_index)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df = pd.concat([df_index, df_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
####c1
df_c1.to_csv("c1_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_1d.txt", "r") as f1, open("pA_c1_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_1d.txt")
####c12
df_c12.to_csv("c12_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_1d.txt", "r") as f1, open("pA_c12_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_1d.txt")
####c123
df_c123.to_csv("c123_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_1d.txt", "r") as f1, open("pA_c123_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_1d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_1d.txt", "r") as f1, open(
"pA_c1_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_1d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_1d.txt", "r") as f1, open(
"pA_c12_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_1d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_1d.txt", "r") as f1, open(
"pA_c123_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_1d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_1d.txt", "r") as f1, open(
"c1_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_1d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_1d.txt", "r") as f1, open(
"c12_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_1d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_1d.txt", "r") as f1, open(
"c123_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_1d.txt")
####c1
indices_c1_1d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_1d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_1d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_1d.pickle", "wb") as f:
pk.dump(frames_c1_1d, f)
with open("indices_c1_1d.pickle", "wb") as f:
pk.dump(indices_c1_1d, f)
####c12
indices_c12_1d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_1d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_1d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_1d.pickle", "wb") as f:
pk.dump(frames_c12_1d, f)
with open("indices_c12_1d.pickle", "wb") as f:
pk.dump(indices_c12_1d, f)
####c123
indices_c123_1d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_1d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_1d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_1d.pickle", "wb") as f:
pk.dump(frames_c123_1d, f)
with open("indices_c123_1d.pickle", "wb") as f:
pk.dump(indices_c123_1d, f)
##saving probabilities for each selected frame
####c1
prob_c1_1d_list = []
for i in indices_c1_1d:
prob_c1_1d_list.append(df_c1["pA_c1"][i])
prob_c1_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_1d_list
)
)
prob_c1_1d_list = [x / n_structures for x in prob_c1_1d_list]
with open("prob_c1_1d_list.pickle", "wb") as f:
pk.dump(prob_c1_1d_list, f)
####c12
prob_c12_1d_list = []
for i in indices_c12_1d:
prob_c12_1d_list.append(df_c12["pA_c12"][i])
prob_c12_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_1d_list
)
)
prob_c12_1d_list = [x / n_structures for x in prob_c12_1d_list]
with open("prob_c12_1d_list.pickle", "wb") as f:
pk.dump(prob_c12_1d_list, f)
####c123
prob_c123_1d_list = []
for i in indices_c123_1d:
prob_c123_1d_list.append(df_c123["pA_c123"][i])
prob_c123_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_1d_list
)
)
prob_c123_1d_list = [x / n_structures for x in prob_c123_1d_list]
with open("prob_c123_1d_list.pickle", "wb") as f:
pk.dump(prob_c123_1d_list, f)
ref_df_1d = pd.DataFrame(bins, columns=["dim0", "dim1"])
ref_df_1d["bins"] = ref_df_1d.agg(
lambda x: f"[{x['dim0']} , {x['dim1']}]", axis=1
)
ref_df_1d = ref_df_1d[["bins"]]
index_ref_1d = []
for i in range(len(bins)):
index_ref_1d.append(i)
index_ref_df_1d = pd.DataFrame(index_ref_1d, columns=["index"])
df_ref_1d = pd.concat([ref_df_1d, index_ref_df_1d], axis=1)
df_ref_1d.to_csv("ref_1d.txt", header=True, index=None, sep=" ", mode="w")
df.to_csv("df_1d.csv", index=False)
os.system("rm -rf __pycache__")
print("Successfully Completed Reweighing")
def reweight_2d(
binspace=10,
n_structures=4,
Xdim=[-180, 180],
Ydim=[-180, 180],
T=300.0,
min_prob=0.000001,
):
"""
Reweights boosted potential energies in two-dimensions
based on Maclaurin series expansion to one, two and
three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles (1st dimension)
Ydim: list
Range of dihedral angles (2nd dimension)
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Phi_Psi = pd.read_csv("Phi_Psi.dat", delim_whitespace=True, header=None)
df_Phi_Psi.columns = ["Phi", "Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Phi_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
binsY = np.arange(float(Ydim[0]), (float(Ydim[1]) + binspace), binspace)
hist2D, hist_edgesX, hist_edgesY = np.histogram2d(
df_Phi_Psi["Phi"].values.tolist(),
df_Phi_Psi["Psi"].values.tolist(),
bins=(binsX, binsY),
weights=None,
)
pstarA_2D = [i / sum_total for i in list(hist2D)]
bins_tuple_X = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
bins_tuple_Y = create_bins(
lower_bound=int(Ydim[0]), width=binspace, upper_bound=int(Ydim[1])
)
bins = []
for i in range(len(bins_tuple_X)):
for j in range(len(bins_tuple_Y)):
bins.append([bins_tuple_X[i], bins_tuple_Y[j]])
pstarA = [item for elem in pstarA_2D for item in elem]
hist = [item for elem in hist2D for item in elem]
hist = [int(i) for i in hist]
data_X = df_Phi_Psi["Phi"].values.tolist()
binned_weights_X = []
for value in data_X:
bin_index_X = find_bin(value, bins_tuple_X)
binned_weights_X.append(bin_index_X)
data_Y = df_Phi_Psi["Psi"].values.tolist()
binned_weights_Y = []
for value in data_Y:
bin_index_Y = find_bin(value, bins_tuple_Y)
binned_weights_Y.append(bin_index_Y)
binned_weights_2D = []
for i in range(len(binned_weights_X)):
binned_weights_2D.append([binned_weights_X[i], binned_weights_Y[i]])
binned_weights = []
for i in range(len(binned_weights_2D)):
binned_weights.append(
(binned_weights_2D[i][0] * len(bins_tuple_Y))
+ (binned_weights_2D[i][1] + 1)
)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df_index["index"] = df_index["index"] - 1
df = pd.concat([df_index, df_Phi_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
df_c1.to_csv("c1_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_2d.txt", "r") as f1, open("pA_c1_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_2d.txt")
####c12
df_c12.to_csv("c12_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_2d.txt", "r") as f1, open("pA_c12_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_2d.txt")
####c123
df_c123.to_csv("c123_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_2d.txt", "r") as f1, open("pA_c123_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_2d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_2d.txt", "r") as f1, open(
"pA_c1_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_2d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_2d.txt", "r") as f1, open(
"pA_c12_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_2d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_2d.txt", "r") as f1, open(
"pA_c123_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_2d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_2d.txt", "r") as f1, open(
"c1_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_2d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_2d.txt", "r") as f1, open(
"c12_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_2d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_2d.txt", "r") as f1, open(
"c123_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_2d.txt")
####c1
indices_c1_2d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_2d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_2d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_2d.pickle", "wb") as f:
pk.dump(frames_c1_2d, f)
with open("indices_c1_2d.pickle", "wb") as f:
pk.dump(indices_c1_2d, f)
####c12
indices_c12_2d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_2d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_2d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_2d.pickle", "wb") as f:
pk.dump(frames_c12_2d, f)
with open("indices_c12_2d.pickle", "wb") as f:
pk.dump(indices_c12_2d, f)
####c123
indices_c123_2d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_2d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_2d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_2d.pickle", "wb") as f:
pk.dump(frames_c123_2d, f)
with open("indices_c123_2d.pickle", "wb") as f:
pk.dump(indices_c123_2d, f)
##saving probabilities for each selected frame
####c1
prob_c1_2d_list = []
for i in indices_c1_2d:
prob_c1_2d_list.append(df_c1["pA_c1"][i])
prob_c1_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_2d_list
)
)
prob_c1_2d_list = [x / n_structures for x in prob_c1_2d_list]
with open("prob_c1_2d_list.pickle", "wb") as f:
pk.dump(prob_c1_2d_list, f)
####c12
prob_c12_2d_list = []
for i in indices_c12_2d:
prob_c12_2d_list.append(df_c12["pA_c12"][i])
prob_c12_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_2d_list
)
)
prob_c12_2d_list = [x / n_structures for x in prob_c12_2d_list]
with open("prob_c12_2d_list.pickle", "wb") as f:
pk.dump(prob_c12_2d_list, f)
####c123
prob_c123_2d_list = []
for i in indices_c123_2d:
prob_c123_2d_list.append(df_c123["pA_c123"][i])
prob_c123_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_2d_list
)
)
prob_c123_2d_list = [x / n_structures for x in prob_c123_2d_list]
with open("prob_c123_2d_list.pickle", "wb") as f:
pk.dump(prob_c123_2d_list, f)
ref_df_2d = | pd.DataFrame(bins, columns=["binsX", "binsY"]) | pandas.DataFrame |
from wordcloud import WordCloud, STOPWORDS
from PIL import Image
import os
import seaborn as sns
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.dates as mdates
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib as mpl
from Support.Additional import get_grey_colour
from Support.LoadData import make_timely
from mpl_toolkits.basemap import Basemap
from matplotlib.colors import Normalize
plt.style.use('seaborn-ticks')
plt.rcParams["font.family"] = "Helvetica"
mpl.rcParams.update(mpl.rcParamsDefault)
def wordcloud_figure(abstract_count, output_file):
""" Make the double helix word cloud: just a bit of fun."""
words_array = []
with open(abstract_count, 'r', errors='replace') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['word'].lower() not in STOPWORDS:
if len(row['word']) > 3:
words_array.append(
(row['word'].upper(), float(row['num_words'])))
mask = Image.new('RGBA', (8000, 4467))
icon = Image.open(os.path.abspath(
os.path.join('__file__', '../..', 'Data', 'Support',
'doublehelix_mask.png'))).convert('RGBA')
mask.paste(icon, icon)
mask = np.array(mask)
wc = WordCloud(background_color='white', max_words=1250, mask=mask,
max_font_size=5000)
wc.generate_from_frequencies(dict(words_array))
wc.recolor(color_func=get_grey_colour)
wc.to_file(output_file)
plt.figure(figsize=(16, 8))
plt.imshow(wc, interpolation='bilinear')
plt.axis('off')
plt.show()
def gwas_growth(output_file, Cat_Studies, Cat_Ancestry,
Cat_Ancestry_groupedbyN):
""" Plot the growth of GWAS over time (Figure 1)"""
plt.style.use('seaborn-ticks')
plt.rcParams['font.family'] = 'Helvetica'
plt.rcParams['axes.linewidth'] = 0.75
yearlist = []
yearquarterlist = []
for year in range(2007, 2018):
yearlist.append(str(year))
for quarter in ['Q1', 'Q2', 'Q3', 'Q4']:
yearquarterlist.append(str(year) + quarter)
variables = ['N ≤ 5,000', '5,001 ≤ N ≤ 50,000', '50,001 ≤ N ≤ 100,000',
'100,001 ≤ N', 'N', 'Associations', 'Journals Printing GWAS',
'# Diseases Studied']
df_years, df_quarters = make_timely(variables,
yearlist,
yearquarterlist,
Cat_Studies,
Cat_Ancestry,
Cat_Ancestry_groupedbyN)
plt.figure(figsize=(15, 10))
axA = plt.subplot(2, 1, 1)
ax0variables = ['N ≤ 5,000', '5,001 ≤ N ≤ 50,000',
'50,001 ≤ N ≤ 100,000', '100,001 ≤ N']
ax0 = df_quarters[ax0variables].plot(kind='bar', stacked=True, ax=axA,
color=['#e41a1c', '#377eb8',
'#4daf4a', '#ff7f00'],
alpha=0.6, edgecolor='k')
sns.despine(top=True, right=True, ax=ax0)
ax0.set_ylabel('Number of Study Accessions', fontsize=12)
ax0.tick_params(labelsize=10)
ax0.legend(fontsize=12, loc='upper left')
axB = plt.subplot(2, 2, 3)
ax1a = df_years[['Associations']].plot(ax=axB, color='#e41a1c', alpha=0.75,
rot=90, marker='o', linewidth=1.5,
markersize=8,
label='Associations Discovered',
markeredgecolor='k',
markeredgewidth=0.5)
ax1b = axB.twinx()
ax1b.plot(df_years[['N']], color='#377eb8', marker='s', markersize=7,
linewidth=1.5, markeredgecolor='k', markeredgewidth=0.5)
ax1a.set_ylabel('Number of Associations Discovered', fontsize=12)
ax1b.set_ylabel('Number of Study Participants Analyzed', fontsize=12)
ax1b.grid(False)
axB.plot(0, 0, '-r', color='#377eb8', marker='s', markersize=7,
markeredgecolor='k', markeredgewidth=0.5)
axB.legend(['Associations (left)', 'Participants (right)'],
fontsize=12, loc='upper left')
ax1a.tick_params(labelsize=10)
ax1b.tick_params(labelsize=10)
plt.axis('tight')
axC = plt.subplot(2, 2, 4)
axtest = axC.twinx()
ax_2a = df_years[['Journals Printing GWAS']].plot(kind='bar',
ax=axC,
position=1,
color='#377eb8',
legend=False,
width=0.35,
alpha=0.75,
edgecolor='k')
ax_2b = df_years[['# Diseases Studied']].plot(kind='bar',
ax=axtest,
position=0,
color='#ff7f00',
width=0.35,
legend=False,
alpha=0.75,
edgecolor='k')
ax_2a.set_ylabel('Unique Number of Journals Publishing GWAS', fontsize=12)
ax_2b.set_ylabel('Unique Number of Diseases Studied', fontsize=12)
ax_2b.grid(False)
axC.plot(np.nan, '#377eb8', linewidth=4)
axC.plot(np.nan, '#ff7f00', linewidth=4)
axC.legend(['Journals (left)', 'Diseases (right)'],
fontsize=12, loc='upper left')
ax_2a.margins(1, 0.5)
ax_2a.tick_params(labelsize=10)
ax_2b.tick_params(labelsize=10)
plt.axis('tight')
plt.tick_params(axis='both', which='minor', labelsize=10)
plt.tight_layout()
plt.savefig(output_file, bbox_inches='tight')
def choropleth_map(df, input_series, cmap, output_path):
""" fairly generic function to make a choropleth map
feed in either 'N' or 'ParticipationPerPerson': Population adjusted is
just for robustness"""
cm = plt.get_cmap(cmap)
df['scheme'] = [
cm(df[input_series][i] / df[input_series].max()) for i in df.index]
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, facecolor='#deeff5', frame_on=False)
m = Basemap(lon_0=0, projection='robin', resolution='i')
m.drawmapboundary(color='k', linewidth=0.075)
m.drawcountries(color='k', linewidth=0.025)
m.drawmeridians(np.arange(0, 360, 60), labels=[False, False, False, True],
color='#bdbdbd', dashes=[6, 6], linewidth=0.1, fontsize=10)
m.drawparallels(np.arange(-90, 90, 30), labels=[True, False, False, False],
color='#bdbdbd', dashes=[6, 6], linewidth=0.1, fontsize=10)
m.readshapefile(os.path.abspath(os.path.join('__file__',
'../..',
'data',
'ShapeFiles',
'ne_10m_admin_0_countries')),
'units', color='#444444', linewidth=.075)
for info, shape in zip(m.units_info, m.units):
country = info['NAME_CIAWF']
if country not in df.index:
color = 'w'
else:
color = df.loc[country]['scheme']
patches = [Polygon(np.array(shape), True)]
pc = PatchCollection(patches)
pc.set_facecolor(color)
ax.add_collection(pc)
norm = Normalize()
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=cm)
if input_series == 'N':
mapper.set_array(df[input_series] / 1000000)
else:
mapper.set_array(df[input_series])
clb = plt.colorbar(mapper, shrink=0.75)
clb.ax.tick_params(labelsize=10)
if input_series == 'N':
clb.ax.set_title('Number of\n People (m)', y=1.02, fontsize=11)
elif input_series == 'Per Rec':
clb.ax.set_title('Participations\nPer Recruitment',
y=1.02, fontsize=11)
plt.savefig(output_path, bbox_inches='tight')
plt.show()
def plot_heatmap(funder_ancestry, funder_parent, output_path):
''' Build the funder heatmaps '''
sns.set(font_scale=1, font='Arial', style='white')
f, (ax1, ax2) = plt.subplots(nrows=1,
ncols=2,
sharex=False,
sharey=True,
figsize=(18, 11),
gridspec_kw={'width_ratios': [.25, .7],
'wspace': .05, 'hspace': 0})
gg = sns.heatmap(funder_ancestry.astype(float),
ax=ax1,
fmt='.0f',
annot=True,
cmap='Oranges',
xticklabels=True,
yticklabels=True,
linewidth=0.1,
linecolor='k',
robust=True,
cbar=False,
annot_kws={'size': 10})
gg.tick_params(axis='both', which='major', labelsize=12)
gg.set_xlabel('Ancestry', fontsize=12)
hh = sns.heatmap(funder_parent.astype(float),
ax=ax2,
fmt='.0f',
annot=True,
cmap='Blues',
xticklabels=True,
yticklabels=True,
linewidth=0.1,
linecolor='k',
robust=True,
cbar=False,
annot_kws={'size': 11})
hh.tick_params(axis='both', which='major', labelsize=12)
hh.set_xlabel('Broad EFO Category', fontsize=12)
plt.gcf()
plt.setp(ax2.get_yticklabels(), visible=False)
plt.tight_layout(h_pad=5)
plt.savefig(output_path, bbox_inches='tight')
def plot_bubbles(output_path, Cat_Ancestry,
Broad_Ancestral_NoNR, countriesdict):
""" This makes the Broader Ancestry bubble plot (Figure 2) """
fig = plt.figure(figsize=(12, 6), dpi=800)
ax = fig.add_subplot(1, 1, 1)
for obs in Cat_Ancestry.index:
for key, value in countriesdict.items():
if Cat_Ancestry['Broader'][obs].strip() == key:
ax.plot_date(x=pd.to_datetime(Cat_Ancestry['Dates'][obs]),
y=Cat_Ancestry['N'][obs],
color=value,
marker='.',
label='the data',
alpha=0.4,
markersize=Cat_Ancestry['N'][obs] / 6500)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=24))
ax.set_xlim(pd.Timestamp('2007-01-01'), | pd.Timestamp('2018-12-31') | pandas.Timestamp |
import os
import fnmatch
import numpy as np
import csv
import sys
import pandas as pd
import re
from sklearn import preprocessing
from scipy import signal
from scipy import stats
def readinputclustering(filename, preprocessingmode):
df = pd.read_csv(filename, header=None)
X = df.ix[:, 1::].astype(float)
X.fillna(0, inplace=True)
labels = df.ix[:, 0]
if preprocessing == 'log':
# log transform the dataframe cause values differ by orders of magnitude
X = np.log(X)
X[~np.isfinite(X)] = 0
labels = df.ix[:, 0]
else:
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(X)
X = pd.DataFrame(x_scaled)
labels = df.ix[:, 0]
return X, labels
# reads input for SAX time series discretization
def readMStimedomaintransient(filename):
MS = pd.read_csv(filename, sep =',')
return MS
def crawl_folders(path, extensions):
directories = []
for dirpath, dirnames, files in os.walk(path):
for directory in dirnames:
if directory != 'rawdata' and directory != 'spectrograms' and directory != 'spectrogrampics' and directory != 'results':
p = os.path.join(dirpath, directory)
directories.append(p)
return directories
# find files path, reads csv files only unless specified differently in extensions
def find_files(path, extensions):
# Allow both with ".csv" and without "csv" to be used for extensions
extensions = [e.replace(".", "") for e in extensions]
for dirpath, dirnames, files in os.walk(path):
for extension in extensions:
for f in fnmatch.filter(files, "*.%s" % extension):
p = os.path.join(dirpath, f)
yield (p, extension)
# maybe specify a limit parameter such that optionally
# only part of the spectrogram is examined for now leave whole
# spectrogram
# to make comparisons between m/z series normalization within and between samples is necessary
def read(filename):
spectrogram = pd.read_csv(filename, sep =',')
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(spectrogram)
arr2D = x_scaled
return arr2D
# read in original freq, mass, intensity data raw data from Basem
def readdataframe(filename):
sampleID = os.path.basename(filename)
originaldata = | pd.read_table(filename, sep=',', header=0) | pandas.read_table |
from torch.utils.data import DataLoader, Dataset
import cv2
import os
from utils import make_mask,mask2enc,make_mask_
import numpy as np
import pandas as pd
from albumentations import (HorizontalFlip, Normalize, Compose, Resize, RandomRotate90, Flip, RandomCrop, PadIfNeeded)
from albumentations.pytorch import ToTensor
from sklearn.model_selection import train_test_split,GroupKFold,KFold,GroupShuffleSplit
path = './input/'
RANDOM_STATE = 2019
class SteelDataset(Dataset):
def __init__(self, df, data_folder, mean, std, phase):
self.df = df
self.root = data_folder
self.mean = mean
self.std = std
self.phase = phase
self.transforms = get_transforms(phase, mean, std)
self.fnames = self.df.index.tolist()
def __getitem__(self, idx):
image_id, mask = make_mask(idx, self.df)
image_path = os.path.join(self.root, "train_images", image_id)
# img = Image.open(image_path)
# img = np.array(img)[:, :, 0]
img = cv2.imread(image_path)[:, :, 0]
img = img[:, :, np.newaxis]
augmented = self.transforms(image=img, mask=mask)
img = augmented['image']
mask = augmented['mask'] # 1x256x1600x4
mask = mask[0].permute(2, 0, 1) # 1x4x256x1600
return img, mask
def __len__(self):
return len(self.fnames)
class SteelDatasetCopped(Dataset):
def __init__(self, df, data_folder, mean= (0.41009), std= (0.16991), phase='train'):
self.df = df
self.root = data_folder
self.mean = (0.3959)
self.std = (0.1729)
self.phase = phase
self.transforms = get_transforms(phase, mean, std)
self.fnames = self.df.index.tolist()
def __getitem__(self, idx):
image_id, mask = make_mask_(idx, self.df)
# print(image_id)
image_path = os.path.join(self.root, "images", image_id)
try:
img = cv2.imread(image_path)[:, :, 0]
except Exception:
image_path = os.path.join(self.root, "images_n", image_id)
img = cv2.imread(image_path)[:, :, 0]
img = img[:, :, np.newaxis]
augmented = self.transforms(image=img, mask=mask)
img = augmented['image']
mask = augmented['mask'] # 1x256x1600x4
mask = mask[0].permute(2, 0, 1) # 1x4x256x1600
return img, mask
def __len__(self):
return len(self.fnames)
def get_transforms(phase, mean, std):
list_transforms = []
if phase == "train":
list_transforms.extend(
[
# PadIfNeeded(min_height=256, min_width=256),
# RandomCrop(height=256, width=256, p=1),
# RandomCrop(height=224, width=224, p=1),
HorizontalFlip(p=0.5), # only horizontal flip as of now
Flip(p=0.5),
# RandomRotate90(p=0.5),
# PadIfNeeded(min_height=256, min_width=256)
]
)
else:
list_transforms.extend(
[
PadIfNeeded(min_height=256, min_width=256),
]
)
list_transforms.extend(
[
Normalize(mean=mean, std=std, p=1),
ToTensor(),
]
)
list_trfms = Compose(list_transforms)
return list_trfms
def provider(
data_folder,
df_path,
phase,
mean=None,
std=None,
batch_size=4,
num_workers=4,
cropped=False
):
'''Returns dataloader for the model training'''
if cropped ==False:
df = pd.read_csv(df_path)
# some preprocessing
# https://www.kaggle.com/amanooo/defect-detection-starter-u-net
df['ImageId'], df['ClassId'] = zip(*df['ImageId_ClassId'].str.split('_'))
df['ClassId'] = df['ClassId'].astype(int)
df = df.pivot(index='ImageId', columns='ClassId', values='EncodedPixels')
df['defects'] = df.count(axis=1)
train_df, val_df = train_test_split(df, test_size=0.2, stratify=df["defects"], random_state=RANDOM_STATE)
df = train_df if phase == "train" else val_df
image_dataset = SteelDataset(df, data_folder, mean, std, phase)
dataloader = DataLoader(
image_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
shuffle=True,
)
else:
if os.path.exists('./other_thing/cropped_df.csv'):
df_ = pd.read_csv('./other_thing/cropped_df.csv')
df_ = df_.fillna('')
else:
print('Prepare rle ing')
df = pd.DataFrame()
df['ImageId'] = os.listdir('./other_thing/images')
df['Image'] = df['ImageId'].apply(lambda x: x.split('.')[0][:-2])
predictions = []
for imgid in os.listdir('./other_thing/images'):
mask = cv2.imread('./other_thing/masks/'+imgid)
rles = mask2enc(mask)
predictions.append(rles)
img_neg = pd.read_csv('./input/pred.csv')
img_neg = img_neg['fname'].unique()[:15000]
df2 = pd.DataFrame()
df2['ImageId'] = img_neg
df2['Image'] = df2['ImageId'].apply(lambda x: x.split('.')[0][:-2])
predictions2 = [['', '', '', '']]*15000
df_ = pd.DataFrame(predictions2+predictions, columns=[1, 2, 3, 4])
df_['ImageId'] = pd.concat([df2, df], axis=0)['ImageId'].values
df_['Image'] = | pd.concat([df2, df], axis=0) | pandas.concat |
import pandas as pd
from collections import defaultdict
import datetime
from xlrd import xldate_as_datetime
import os
import sys
import json
from openpyxl import load_workbook
from .os_functions import last_day_of_month,enter_exit, generate_md5
from .regex_functions import replace_re_special, strQ2B, strB2Q,symbol_to_spaces, normalize_punctuations
from .nlp_functions import get_keyword_dict, get_word_freq_dict, convert_key2list, process_text_eng
from .excel_functions import write_format_columns
from .func_classes import DfDict
import gc
import re
import warnings
import traceback
import logging
from pandas.errors import OutOfBoundsDatetime
import swifter
from flashtext import KeywordProcessor
warnings.filterwarnings('ignore')
def read_config_table(file_path, dtype=str):
df = | pd.DataFrame([]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Pixel Starships Market API
# ----- Packages ------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import datetime
import csv
import numpy as np
import os
import pandas as pd
import pss_core as core
import pss_prestige as p
import re
import urllib.request
import xml.etree.ElementTree
# Discord limits messages to 2000 characters
MESSAGE_CHARACTER_LIMIT = 2000
HOME = os.getenv('HOME')
base_url = 'http://{}/'.format(core.get_production_server())
# ----- Utilities -----------------------------------------------------
def save_raw_text(raw_text, filename):
with open(filename, 'w') as f:
f.write(raw_text)
def get_base_url(api_version=1, https=False):
if https is True:
prefix = 'https://'
else:
prefix = 'http://'
if api_version==2:
return prefix + 'api2.pixelstarships.com/'
else:
return prefix + 'api.pixelstarships.com/'
# ----- Get Latest Version --------------------------------------------
def get_latest_version():
url= base_url + 'SettingService/GetLatestVersion?language=Key=en'
data = urllib.request.urlopen(url).read()
return data.decode()
# ----- Item Designs --------------------------------------------------
def get_item_designs():
url = base_url + 'ItemService/ListItemDesigns2?languageKey=en'
data = urllib.request.urlopen(url).read()
return data.decode()
def save_item_design_raw(raw_text):
now = datetime.datetime.now()
filename = 'data/items-{}.txt'.format(now.strftime('%Y%m%d'))
save_raw_text(raw_text, filename)
def load_item_design_raw(refresh=False):
now = datetime.datetime.now()
filename = 'data/items{}.txt'.format(now.strftime('%Y%m%d'))
if os.path.isfile(filename) and refresh is False:
with open(filename, 'r') as f:
raw_text = f.read()
else:
raw_text = get_item_designs()
save_item_design_raw(raw_text)
return raw_text
def parse_item_designs(raw_text):
d = {}
# r_lookup = {}
root = xml.etree.ElementTree.fromstring(raw_text)
for c in root:
# print(c.tag) # ListItemDesigns
for cc in c:
# print(cc.tag) # ItemDesigns
for ccc in cc:
# print(ccc.tag) # ItemDesign
if ccc.tag != 'ItemDesign':
continue
item_name = ccc.attrib['ItemDesignName']
d[item_name] = ccc.attrib
# r_lookup[int(ccc.attrib['ItemDesignId'])] = item_name
return d
def xmltext_to_df(raw_text):
df = pd.DataFrame()
root = xml.etree.ElementTree.fromstring(raw_text)
for c in root:
for cc in c:
for i, ccc in enumerate(cc):
df = df.append(pd.DataFrame(ccc.attrib, index=[i]))
return df
# ----- Lists ---------------------------------------------------------
def get_lists(df_items):
item_rarities = list(df_items.Rarity.unique())
item_enhancements = list(df_items.EnhancementType.unique())
item_types = list(df_items.ItemType.unique())
item_subtypes = list(df_items.ItemSubType.unique())
return item_rarities, item_enhancements, item_types, item_subtypes
# ----- Parsing -------------------------------------------------------
def fix_item(item):
# Convert to lower case & non alpha-numeric
item = re.sub('[^a-z0-9]', '', item.lower())
item = re.sub('anonmask', 'anonymousmask', item)
item = re.sub('armour', 'armor', item)
item = re.sub('bunny', 'rabbit', item)
item = re.sub("(darkmatterrifle|dmr)(mark|mk)?(ii|2)", "dmrmarkii", item)
item = re.sub('golden', 'gold', item)
return item
def filter_item_designs(search_str, rtbl, filter):
item_original = list(rtbl.keys())
item_lookup = [ fix_item(s) for s in item_original ]
item_fixed = fix_item(search_str)
txt = ''
for i, item_name in enumerate(item_lookup):
m = re.search(item_fixed, item_name)
if m is not None:
item_name = item_original[i]
d = rtbl[item_name]
# Filter out items
if (item_name == 'Gas' or
item_name == 'Mineral' or
d['MissileDesignId'] != '0' or
d['CraftDesignId'] != '0' or
d['CharacterDesignId'] != '0'):
continue
# Process
# item_price = d['FairPrice']
item_price = d['MarketPrice']
item_slot = re.sub('Equipment', '', d['ItemSubType'])
item_stat = d['EnhancementType']
item_stat_value = d['EnhancementValue']
if filter == 'price':
if item_price == '0':
item_price = 'NA'
txt += '{}: {}\n'.format(item_name, item_price)
elif filter == 'stats':
if item_stat == 'None':
continue
txt += '{}: {} +{} ({})\n'.format(item_name,
item_stat, item_stat_value, item_slot)
else:
print('Invalid filter')
quit()
if len(txt) == 0:
return None
else:
return txt.strip('\n')
def get_real_name(search_str, rtbl):
item_original = list(rtbl.keys())
item_lookup = [ fix_item(s) for s in item_original ]
item_fixed = fix_item(search_str)
try:
# Attempt to find an exact match
idx = item_lookup.index(item_fixed)
return item_original[idx]
except:
# Perform search if the exact match failed
m = [ re.search(item_fixed, n) is not None for n in item_lookup ]
item = pd.Series(item_original)[m]
if len(item) > 0:
return item.iloc[0]
else:
return None
# ----- Item Stats ----------------------------------------------------
def get_item_stats(item_name):
raw_text = load_item_design_raw()
item_lookup = parse_item_designs(raw_text)
market_txt = filter_item_designs(item_name, item_lookup, filter='stats')
if market_txt is not None:
market_txt = '**Item Stats**\n' + market_txt
return market_txt
# ----- Best Items ----------------------------------------------------
def rtbl2items(rtbl):
df_rtbl = | pd.DataFrame(rtbl) | pandas.DataFrame |
import time
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pynanoflann
from contexttimer import Timer
from sklearn import neighbors
n_index_points = 200000
n_query_points = 1000
n_repititions = 5
data_dim = 3
n_neighbors = 100
index_type = np.float32
data = np.random.uniform(0, 100, size=(n_index_points, data_dim)).astype(index_type)
queries = np.random.uniform(0, 100, size=(n_query_points, data_dim)).astype(index_type)
algs = {
'sklearn_brute': neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm='brute'),
'sklearn_ball_tree': neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree'),
'sklearn_kd_tree': neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm='kd_tree'),
'pynanoflann': pynanoflann.KDTree(n_neighbors=n_neighbors),
}
results = []
for rep in range(n_repititions):
for alg_name, nn in algs.items():
with Timer() as index_build_time:
nn.fit(data)
with Timer() as query_time:
dist, idx = nn.kneighbors(queries)
results.append((alg_name, index_build_time.elapsed, query_time.elapsed))
df = | pd.DataFrame(results, columns=['Algorithm', 'Index build time, second', 'Query time, second']) | pandas.DataFrame |
import pandas as pd
import seaborn as sn
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
##read in the file
file_name="train.csv"
titanic=pd.read_csv(file_name,header=0,sep=",")
###split data
X=titanic.drop("Survived",axis=1)
y=titanic["Survived"]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2, random_state=42) #80% of training, 20% test
###inspect data
X_train.head()
titanic_edl=pd.concat([X_train, y_train], axis=1)
titanic_edl.head()
titanic_edl=pd.concat([X_train, y_train], axis=1)
titanic_edl.head()
### inspect survival
titanic_edl[["Survived","PassengerId"]].groupby("Survived").nunique()
#### create 3 colmns : one for total family members and one for titles (embeded in names)
###one for cabins
titanic_edl['Title'] = titanic_edl['Name'].map(lambda x: x.split(', ')[1].split('.')[0])
titanic_edl['Title'].unique()
titanic_edl["Family_size"]=titanic_edl["SibSp"] + titanic_edl["Parch"]
titanic_edl["Level_cabin"]=titanic_edl["Cabin"].str[0]
titanic_edl["Level_cabin"][titanic_edl.Level_cabin.isna()]="No Cabin"
###percentage of survived customers from 1st class
first_class=titanic_edl["Pclass"]==1
survived_1=titanic_edl[["Pclass","Survived","PassengerId"]].loc[first_class].groupby("Survived").nunique()["PassengerId"]
survived_1_total=titanic_edl[["Pclass","Survived","PassengerId"]].loc[first_class].nunique()["PassengerId"]
prop_survived_1=survived_1/survived_1_total*100
print(prop_survived_1)
#####bar plot showing proportion of males to females over all classes
titanic_edl[["Pclass","Sex","PassengerId"]].groupby(["Pclass","Sex"]).count().plot.bar()
male_female=titanic_edl[["Pclass","Sex","PassengerId"]].groupby(["Pclass","Sex"]).count()
male_female.reset_index(inplace=True)
###plot with seaborn males/females
plt.figure(figsize=(5,5))
sn.barplot(x="Pclass",y="PassengerId",hue="Sex",data=male_female)
plt.xlabel("Class")
plt.ylabel("Number of passengers")
plt.title("Gender distribution")
plt.show()
###females outnumber males in all classes, except the 3ed class, where males are more
#than double the number of females
####survival of females and males in all classes
grouped_=titanic_edl[["Pclass","Sex","PassengerId","Survived"]].groupby(["Pclass","Sex","Survived"]).count()
grouped_.reset_index(inplace=True)
###find distribution of total survived? total non survived over class and sex
grouped_["SV_NSV"]=grouped_[["Survived","PassengerId"]].groupby("Survived").transform("sum")
grouped_["prop_surv"]=grouped_["PassengerId"]/grouped_["SV_NSV"]*100
grouped_.head()
#### plot survival based on sex over all classes, sum of probability of survival is 1
plt.figure(figsize=(5,5))
sn.barplot(x="Pclass",y="prop_surv",hue="Sex",data=grouped_[grouped_["Survived"]==1])
plt.xlabel("Class")
plt.ylabel("Survival rate")
plt.title("Survival rate based on classe for all genders)")
plt.show()
###when it comes to survival rates: females again have a higher survival rate than men,
# even in the 3ed class (by 5%), where men outnumber women
#women survival is actually almost more tahn double that of men
#### calculate how many % of women actually survived vs men
grouped_["Total_gender"]=grouped_[["Sex","PassengerId"]].groupby("Sex").transform("sum")
grouped_["Surv_sex"]=grouped_["PassengerId"]/grouped_["Total_gender"]*100
###plot survival rates based on gender
plt.figure(figsize=(5,5))
sn.boxplot(x="Sex",y="Surv_sex",hue="Survived", data=grouped_)
plt.xlabel("Sex")
plt.ylabel("Survival rate")
plt.title("Survival distribution based on gender (over all classes)")
plt.show()
#### age and fare
titanic_edl[["Survived","Age"]].groupby(["Survived"]).mean()
### on average the lower the age the higher the survival chances were (28.4)
titanic_edl[["Survived","Age","Sex"]].groupby(["Survived","Sex"]).mean().unstack().plot.bar()
plt.title("Age distribution per gender and survival")
plt.show()
#### overall age mean for surviving women passangers was higher than that of surviving male passangers,
# but also higher than that of non surviving females (which is strange ).
# basically age for women is directly prop to survival rate . for men the distribution is
# as expected --> namly older men died whilst younger survived
titanic_edl[["Survived","Age","Pclass"]].groupby(["Survived","Pclass"]).mean().unstack().plot.bar()
plt.ylabel("Age")
plt.xlabel("Survived, Class")
plt.title("Survived per age and class")
plt.show()
### this looks a bit more "normal": the survival age increases by class and is usually lower than the age on non survival
####let"s look at the age distribution for each passenger class
#We can set dodge as True in the swarmplot to split the distributions
fig, ax = plt.subplots(figsize=(12,6), nrows=1, ncols=3)
plt.suptitle('Age Distribution of Survivor state by Gender in Each Class', fontsize=16)
for i in range(3):
ax[i].set_title('Class {}'.format(i+1))
ax[i].set_ylim(-5,85)
sn.boxplot(data=titanic_edl[titanic_edl['Pclass']==i+1],
x='Survived',
y='Age',
hue='Sex',
hue_order=['male','female'],
dodge=True,
ax=ax[i])
ax[1].set_ylabel(None)
ax[2].set_ylabel(None)
ax[0].legend_.remove()
ax[1].legend_.remove()
##lets look at prices
titanic_edl[["Survived","Fare"]].groupby(["Survived"]).mean()
titanic_edl[["Survived","Fare","Sex"]].groupby(["Survived","Sex"]).mean().unstack().plot.bar()
plt.ylabel("Fare Prices")
plt.title("Average Fare price per gender and survival")
plt.show()
### fare prices for females who survived, where higher than those of men who survived
### fare price seems to be correlated to more than just class,
# # since females are less in absolute numbers, but whit higher fare rates
# # men in fisrt class are more than double than women in fisrt class
titanic_edl[["Survived","Fare","Sex","Pclass"]].groupby(["Survived","Sex","Pclass"]).mean().unstack().plot.bar(legend=False)
plt.ylabel("Fare")
plt.title("Fare Price distributed across survival state, per gender and class")
## men the ones that survive consitantly outpay the ones that don"t
##Let"s look at the distribution of Fare prices accross classes and genders
fig, ax = plt.subplots(figsize=(12,6), nrows=1, ncols=3)
plt.suptitle('Fare Price Distribution of Survivor state by Gender in Each Class', fontsize=16)
for i in range(3):
ax[i].set_title('Class {}'.format(i+1))
ax[i].set_ylim(-5,260)
sn.boxplot(data=titanic_edl[titanic_edl['Pclass']==i+1],
x='Survived',
y='Fare',
hue='Sex',
hue_order=['male','female'],
dodge=True,
ax=ax[i])
ax[1].set_ylabel(None)
ax[2].set_ylabel(None)
ax[0].legend_.remove()
ax[1].legend_.remove()
##lets look at prices
titanic_edl[["Survived","Fare"]].groupby(["Survived"]).mean()
titanic_edl[["Survived","Fare","Sex"]].groupby(["Survived","Sex"]).mean().unstack().plot.bar()
plt.ylabel("Fare Prices")
plt.title("Average Fare price per gender and survival")
plt.show()
### let"s see the connection amongst sex and sib/spouses and fare and sib/spuses
titanic_edl[["Survived","SibSp","PassengerId"]].groupby(["Survived","SibSp"]).count().unstack().plot.bar()
###survival with up to 4 siblings/spuses (small families)
#the most who survived where alone
###survival with up to 4 siblings/spuses (small families)
titanic_edl[["Survived","SibSp","Sex"]].groupby(["Survived","Sex"]).count()
titanic_edl[["Survived","SibSp","Sex","Fare"]].groupby(["Survived","SibSp","Sex"]).mean()
### the ones that survived have up to 4 siblings, and with 3 siblings you actually spend the highest amount of money
##only women with 3 siblings survived
titanic_edl[["Survived","SibSp","Pclass","Fare"]].groupby(["Survived","SibSp","Pclass"]).mean()
###fare price for 3 siblings is the same in survived and non survived--> it only matters if you are a female in oredr to survive
dist_sib_fare=titanic_edl[["Survived","SibSp","Pclass","Fare"]].groupby(["Survived","SibSp","Pclass"]).mean()
dist_sib_fare.reset_index(inplace=True)
plt.figure(figsize=(5,5))
sn.boxplot(x="SibSp",y="Fare",hue="Survived",data=dist_sib_fare)
plt.xlabel("Siblings")
plt.ylabel("Fare price")
plt.title("Fare prices based on #siblings")
plt.show()
###fare price and gender distribution
sex_sib=titanic_edl[["Survived","SibSp","Sex","Fare"]].groupby(["Survived","SibSp","Sex"]).mean()
sex_sib.reset_index(inplace=True)
plt.figure(figsize=(5,5))
sn.boxplot(x="Sex",y="Fare",hue="Survived",data=sex_sib)
plt.xlabel("Siblings")
plt.ylabel("Fare price")
plt.title("Fare prices based on #siblings")
plt.show()
#### let's look at the significance of name titles to survived class
titanic_edl.groupby("Title")["Survived"].aggregate(["mean","count"])
total_pop=titanic_edl["Survived"].count()
def weighted_survival(df):
weight=df["Survived"].count()/total_pop
surv=df["Survived"] * weight*100
return np.sum(surv)
titanic_edl.groupby("Title").apply(weighted_survival).plot.bar()
plt.title("Avg. weighted Suvival rate by title (adj. by population size")
plt.ylabel("Survival rate in %")
titanic_edl.groupby(["Title","Pclass"])["Survived"].mean().unstack().plot.bar()
plt.title("Avg. weighted Suvival rate by title and class(adj. by population size")
plt.ylabel("Survival rate in %")
###let's investigate family size alone
titanic_edl.groupby(["Family_size"])["Survived"].mean().plot.bar()
plt.title("Survival by family size ")
plt.ylabel("Survival rate in %")
###let's investigate family size based on ther factors: gender, class
titanic_edl.groupby(["Family_size","Pclass"])["Survived"].mean().unstack().plot.bar()
plt.title("Survival by family size and class")
plt.ylabel("Survival rate in %")
####is survival rate dependent on family size and sex?
titanic_edl.groupby(["Family_size","Sex"])["Survived"].mean().unstack().plot.bar()
plt.title("Survival by family size and class")
plt.ylabel("Survival rate in %")
### whats the undelying distribution of male/ females to family size
titanic_edl.groupby(["Family_size","Sex"])["PassengerId"].count().unstack().plot.bar()
plt.title("Survival by family size and class")
plt.ylabel("Number of passengers")
plt.show()
###let"s look at parent alone
titanic_edl.groupby(["Parch"])["Survived"].mean().plot.bar(legend=False)
plt.title("Survival by direct dependecy: parents")
plt.ylabel("Survval rate")
plt.show()
###above depedence of 3 there are no survivers
####Parch dosen"t seem to add any other value
###parents by direct dependency
titanic_edl.groupby(["Parch","Sex"])["Survived"].mean().unstack().plot.bar()
plt.title("Survival by direct dependecy: parents")
plt.ylabel("Survval rate")
plt.show()
###make a dummy variable that encodes having siblings >4 !!
# (the more dependency you have the less likly it is that you survive)
titanic_edl.groupby(["SibSp"])["Survived"].mean().plot.bar()
plt.title("Survival rate by direct dependency: child or spouse")
plt.ylabel("Survval rate")
plt.show()
### children dependent on gender
titanic_edl.groupby(["SibSp","Sex"])["Survived"].mean().unstack().plot.bar()
plt.title("Survival rate by gender and direct dependency: child or spouse")
plt.ylabel("Survval rate")
plt.show()
#####Let"s investigate if cabin is relevant for survival rate
titanic_edl["Level_cabin"].unique()
titanic_edl.groupby("Level_cabin")["Survived"].mean().plot.bar()
plt.title("Survival rates by cabin levels")
plt.ylabel("Survival rate")
###inspect cabin levels by class
titanic_edl.groupby(["Level_cabin","Pclass"])["Survived"].mean().unstack().plot.bar()
plt.title("Survival rates by cabin levels and class")
plt.ylabel("Survival rate")
####most of the upper level cabins belong to the 1st class--? there is a correlation between classse
# and cabins
titanic_edl.groupby(["Level_cabin","Pclass"])["Survived"].count().unstack().plot.bar()
plt.title("Survival rates by cabin levels and class")
plt.ylabel("#Passengers")
###bin split the data
titanic_edl["fam_size"]=pd.cut(titanic_edl["Family_size"], bins=3, labels=["small_fam","medium_fam","large_fam"])
titanic_edl["Sib_Sp_num"]=pd.cut(titanic_edl["SibSp"], bins=2, labels=["less_4","over_4"])
###heatmap with initial variables
one_hot_family=pd.get_dummies(titanic_edl["fam_size"])
one_hot_sibling= | pd.get_dummies(titanic_edl["Sib_Sp_num"]) | pandas.get_dummies |
from contextlib import contextmanager
import pandas as pd
import pytest
from pandas._testing import assert_frame_equal
from biopsykit.metadata import bmi, whr
from biopsykit.utils.exceptions import ValueRangeError
@contextmanager
def does_not_raise():
yield
def data_complete():
return pd.DataFrame(
{
"weight": [68, 68, 64],
"height": [165, 178, 190],
"waist": [76, 92, 0.71],
"hip": [97, 112, 0.89],
}
)
def bmi_correct():
return pd.DataFrame(
{
"weight": [68, 68, 64],
"height": [165, 178, 190],
}
)
def bmi_wrong_order():
return pd.DataFrame(
{
"height": [165, 178],
"weight": [68, 68],
}
)
def whr_correct():
return pd.DataFrame(
{
"waist": [76, 92, 0.71],
"hip": [97, 112, 0.89],
}
)
def whr_wrong_values():
return pd.DataFrame(
{
"hip": [50, 4, 0.5],
"waist": [100, 20, 0.1],
}
)
def bmi_correct_solution():
return pd.DataFrame({"BMI": [24.98, 21.46, 17.73]})
def whr_correct_solution():
return pd.DataFrame({"WHR": [0.784, 0.821, 0.798]})
class TestMetadata:
@pytest.mark.parametrize(
"input_data, expected",
[(bmi_correct(), does_not_raise()), (bmi_wrong_order(), pytest.raises(ValueRangeError))],
)
def test_bmi_raises(self, input_data, expected):
with expected:
bmi(input_data)
@pytest.mark.parametrize(
"input_data, columns, expected",
[(bmi_correct(), None, bmi_correct_solution()), (bmi_correct(), ["weight", "height"], bmi_correct_solution())],
)
def test_bmi(self, input_data, columns, expected):
data_out = bmi(input_data, columns)
| assert_frame_equal(data_out, expected) | pandas._testing.assert_frame_equal |
from warnings import simplefilter
import ntpath
import os
import pandas as pd
import pickle
import run
from colorama import Fore
from pandas import DataFrame
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
simplefilter(action='ignore', category=UserWarning)
dir_path = os.path.dirname(os.path.realpath(__file__))
def input_path():
"""
Check path of the mounted system image and return nothing.
"""
print(
Fore.GREEN + 'Provide full path of mounted system image (.vhdx) ' + Fore.YELLOW +
'e.g. F:\C\Windows or F:\C ')
print(Fore.GREEN)
path = str(input('Path:')).strip()
mount = path[0:2]
# print (mount)
if ntpath.ismount(mount):
# print (mount + ' is mounted')
if path == mount + '\C':
sig_scan(path)
else:
sig_scan(path)
else:
print(Fore.YELLOW + '\nError -provide correct path. Mounted system image -try again \n')
input_path()
return 0
def sig_scan(path):
"""
Receives the location of the mounted files and runs sigcheck.exe and save the output for later analysis
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
sigcheck = dir_path + r'\bin\sigcheck.exe'
options = '-s -c -e -h -v -vt -w -nobanner'
save = dir_path + r'\csvFiles\sigcheckToOrganise.csv'
sig_cmd = sigcheck + ' ' + options + ' ' + save + ' ' + path
print(Fore.YELLOW + '\nThis execution might take some time....')
os.system(sig_cmd)
if os.stat(dir_path + r'\csvFiles\sigcheckToOrganise.csv').st_size <= 317:
print('Try again\n')
input_path()
else:
analysis()
return 0
def analysis():
"""
Analyse the output generated by sigcheck.exe using Machine Learning
"""
save = dir_path + r'\csvFiles\sigcheckToOrganise.csv'
sigs = pd.read_csv(save, encoding='utf-16', delimiter=',')
bigdata = sigs[['Path', 'Verified', 'Machine Type', 'Publisher', 'Description', 'MD5', 'VT detection']]
organised = DataFrame(bigdata)
path_organised = organised['Path']
df1 = organised.loc[organised['Verified'] == 'Unsigned']
df1 = DataFrame(df1)
# ML part #
filename = dir_path + r'\ML\cmdModel.sav'
vectfile = dir_path + r'\ML\vecFile.sav'
se_model = pickle.load(open(filename, 'rb'))
load_vect = pickle.load(open(vectfile, 'rb'))
text = load_vect.transform(path_organised)
print_this = se_model.predict(text)
print_prob = se_model.predict_proba(text) * 100
listdf = pd.DataFrame(print_this)
line_pr = pd.DataFrame(data=print_prob)
linesdf = pd.DataFrame(path_organised)
listdf = listdf.rename(columns={0: 'ML-Output'})
linesdf = linesdf.rename(columns={0: 'path'})
result = pd.concat([linesdf, listdf, line_pr], axis=1, sort=False)
re = result.sort_values(by='ML-Output', ascending=False)
re = pd.DataFrame(re)
dff2 = re.loc[re['ML-Output'] == 1]
pd.DataFrame(dff2).to_excel(dir_path + r'\ML\Step-3-results' + r'\suspicious_paths.xlsx', index=False)
pd.DataFrame(re).to_excel(dir_path + r'\ML\Step-3-results' + r'\all_paths.xlsx', index=False)
if df1.empty:
print(Fore.YELLOW + 'Nothing verified to be suspicious')
if | pd.DataFrame(dff2) | pandas.DataFrame |
"""
====== BLOCK OF FUNCTIONS ======
"""
import xosrm
import os
from IPython.display import display
import pandas as pd
def df_first(data):
df_first = pd.DataFrame(data['первая половина месяца']).T
df_first.fillna(0, inplace=True)
return df_first
def df_second(data):
df_second = pd.DataFrame(data['вторая половина месяца']).T
df_second.fillna(0, inplace=True)
return df_second
def open_excel_file(file_path):
'''Open Excel file and create DataFrame
input: filepath
output: DataFrame'''
data = pd.read_excel(file_path)
return data
def schedule(df):
'''
Crete df for each day for even and odd week
even - четная неделя;
odd - нечентная неделя
input: DataFrame (schedule)
output: dict with coords
'''
# Create nested dicts
monthly_coords = {}
monthly_coords['первая половина месяца'] = {}
monthly_coords['вторая половина месяца'] = {}
monthly_coords['первая половина месяца']['четная нед.'] = {}
monthly_coords['первая половина месяца']['не четная нед.'] = {}
monthly_coords['вторая половина месяца']['четная нед.'] = {}
monthly_coords['вторая половина месяца']['не четная нед.'] = {}
'''First part of the month'''
# Crete df for each day of even week and add to dict
first_part = df[df['№п/п четная нед.'].notnull() &
(df['Интервал повторений'].isin([1,2,4]))]
even_mon = first_part[first_part['Дни недели']==1].sort_values(by=['№п/п четная нед.'])
monthly_coords['первая половина месяца']['четная нед.']['1-ПН'] = even_mon.to_dict('records')
even_tue = first_part[first_part['Дни недели']==2].sort_values(by=['№п/п четная нед.'])
monthly_coords['первая половина месяца']['четная нед.']['2-ВТ'] = even_tue.to_dict('records')
even_wed = first_part[first_part['Дни недели']==3].sort_values(by=['№п/п четная нед.'])
monthly_coords['первая половина месяца']['четная нед.']['3-СР'] = even_wed.to_dict('records')
even_thu = first_part[first_part['Дни недели']==4].sort_values(by=['№п/п четная нед.'])
monthly_coords['первая половина месяца']['четная нед.']['4-ЧТ'] = even_thu.to_dict('records')
even_fri = first_part[first_part['Дни недели']==5].sort_values(by=['№п/п четная нед.'])
monthly_coords['первая половина месяца']['четная нед.']['5-ПТ'] = even_fri.to_dict('records')
# Crete df for each day of odd week and add to dict
first_part = df[df['№п/п не четная нед.'].notnull() &
(df['Интервал повторений'].isin([1,2,4]))]
odd_mon = first_part[first_part['Дни недели']==1].sort_values(by=['№п/п не четная нед.'])
monthly_coords['первая половина месяца']['не четная нед.']['1-ПН'] = odd_mon.to_dict('records')
odd_tue = first_part[first_part['Дни недели']==2].sort_values(by=['№п/п не четная нед.'])
monthly_coords['первая половина месяца']['не четная нед.']['2-ВТ'] = odd_tue.to_dict('records')
odd_wed = first_part[first_part['Дни недели']==3].sort_values(by=['№п/п не четная нед.'])
monthly_coords['первая половина месяца']['не четная нед.']['3-СР'] = odd_wed.to_dict('records')
odd_thu = first_part[first_part['Дни недели']==4].sort_values(by=['№п/п не четная нед.'])
monthly_coords['первая половина месяца']['не четная нед.']['4-ЧТ'] = odd_thu.to_dict('records')
odd_fri = first_part[first_part['Дни недели']==5].sort_values(by=['№п/п не четная нед.'])
monthly_coords['первая половина месяца']['не четная нед.']['5-ПТ'] = odd_fri.to_dict('records')
'''Second part of the month'''
# Crete df for each day of even week and add to dict
first_part = df[df['№п/п четная нед.'].notnull() &
(df['Интервал повторений'].isin([1,2,8]))]
even_mon = first_part[first_part['Дни недели']==1].sort_values(by=['№п/п четная нед.'])
monthly_coords['вторая половина месяца']['четная нед.']['1-ПН'] = even_mon.to_dict('records')
even_tue = first_part[first_part['Дни недели']==2].sort_values(by=['№п/п четная нед.'])
monthly_coords['вторая половина месяца']['четная нед.']['2-ВТ'] = even_tue.to_dict('records')
even_wed = first_part[first_part['Дни недели']==3].sort_values(by=['№п/п четная нед.'])
monthly_coords['вторая половина месяца']['четная нед.']['3-СР'] = even_wed.to_dict('records')
even_thu = first_part[first_part['Дни недели']==4].sort_values(by=['№п/п четная нед.'])
monthly_coords['вторая половина месяца']['четная нед.']['4-ЧТ'] = even_thu.to_dict('records')
even_fri = first_part[first_part['Дни недели']==5].sort_values(by=['№п/п четная нед.'])
monthly_coords['вторая половина месяца']['четная нед.']['5-ПТ'] = even_fri.to_dict('records')
# Crete df for each day of odd week and add to dict
first_part = df[df['№п/п не четная нед.'].notnull() &
(df['Интервал повторений'].isin([1,2,8]))]
odd_mon = first_part[first_part['Дни недели']==1].sort_values(by=['№п/п не четная нед.'])
monthly_coords['вторая половина месяца']['не четная нед.']['1-ПН'] = odd_mon.to_dict('records')
odd_tue = first_part[first_part['Дни недели']==2].sort_values(by=['№п/п не четная нед.'])
monthly_coords['вторая половина месяца']['не четная нед.']['2-ВТ'] = odd_tue.to_dict('records')
odd_wed = first_part[first_part['Дни недели']==3].sort_values(by=['№п/п не четная нед.'])
monthly_coords['вторая половина месяца']['не четная нед.']['3-СР'] = odd_wed.to_dict('records')
odd_thu = first_part[first_part['Дни недели']==4].sort_values(by=['№п/п не четная нед.'])
monthly_coords['вторая половина месяца']['не четная нед.']['4-ЧТ'] = odd_thu.to_dict('records')
odd_fri = first_part[first_part['Дни недели']==5].sort_values(by=['№п/п не четная нед.'])
monthly_coords['вторая половина месяца']['не четная нед.']['5-ПТ'] = odd_fri.to_dict('records')
return monthly_coords
def calc_routes(full_dict):
''' Calculate SIMPLE_ROUTE for coords in a given order
Calculate distances and durations for each day of the week
Output: dict of geometries for each day
'''
result_dict = full_dict.copy()
for key0, value0 in result_dict.items():
print('================== {} =================='.format(key0))
for key, value in value0.items():
print('================== {} =================='.format(key))
for key2, value2 in value.items():
print(key2)
if len(value2) > 1: # If list has more than 1 point
coords = []
for point in value2:
coords.append((point['Долгота'], point['Широта']))
source = coords[0]
dest = coords[0]
coords = coords[1:]
result = xosrm.simple_route(source, dest, coords,
output='full', overview="full", geometry="geojson")
print(result['routes'][0]['distance']/1000, 'km')
print(result['routes'][0]['duration']/60.026, 'min')
result_dict[key0][key][key2].append(result['routes'][0]['geometry'])
else:
pass
return result_dict
def calc_dist(full_dict):
''' Calculate distances for each day of the week
Output: dict of distances
'''
result_dict = full_dict.copy()
# Create dict of distances
dist_dict = {}
for key0, value0 in result_dict.items():
dist_dict[key0] = {}
for key, value in value0.items():
dist_dict[key0][key]= {}
for key2, value2 in value.items():
if len(value2) > 1: # If list has more than 1 point
coords = []
for point in value2:
coords.append([point['Долгота'], point['Широта']])
source = coords[0]
dest = coords[0]
coords = coords[1:]
result = xosrm.simple_route(source, dest, coords, continue_straight="false",
output='full', overview="full", geometry="geojson")
# Add distances to dict
distance = result['routes'][0]['distance']/1000
dist_dict[key0][key][key2] = distance
else:
dist_dict[key0][key][key2] = 0
return dist_dict
def calc_trips(full_dict):
''' Calculate TRIP_ROUTE
Calculate distances and durations for each day of the week
Output: dict of geometries for each day
'''
# Dict of geometries of route by day
result_dict = full_dict.copy()
# Create dict of distances
dist_dict = {}
for key0, value0 in result_dict.items():
dist_dict[key0] = {}
for key, value in value0.items():
dist_dict[key0][key]= {}
for key2, value2 in value.items():
if len(value2) > 1: # If list has more than 1 point
coords = []
for point in value2:
coords.append([point['Долгота'], point['Широта']])
result = xosrm.trip(coords, source='first', roundtrip=True,
output='full', overview="full", geometry="geojson")
distance = result['trips'][0]['distance']/1000
dist_dict[key0][key][key2] = distance
# Get waypoinnt order and replace origin order in the dict
for index, point in enumerate(value2):
if key == 'четная нед.':
point.update({'№п/п четная нед.': result['waypoints'][index]['waypoint_index'] + 1})
point.update({'№п/п не четная нед.': None})
if key == 'не четная нед.':
point.update({'№п/п не четная нед.': result['waypoints'][index]['waypoint_index'] + 1})
point.update({'№п/п четная нед.': None})
else:
dist_dict[key0][key][key2] = 0
return dist_dict, result_dict
def write_to_excel(data, short_file_name):
writer = pd.ExcelWriter('Расписания ТП расчетные.xlsx', engine = 'xlsxwriter')
short_file_name = short_file_name.split(" ")[0]
full_schedule = pd.DataFrame()
for key0, value0 in data[1].items():
for key, value in value0.items():
for key2, value2 in value.items():
df = pd.DataFrame.from_dict(value2, orient='columns')
full_schedule = pd.concat([full_schedule, df], ignore_index=True)
# Get uniq rows
even_part = full_schedule[['Внешний ID ТТ', '№п/п четная нед.']].drop_duplicates(subset='Внешний ID ТТ', keep='first', inplace=False)
# Get only not null rows for even week
even_part = even_part[even_part['№п/п четная нед.'].notnull()]
# Create table with uniq rows WITHOUT even week
odd_part = full_schedule[['Внешний ID ТТ','Клиент', 'Адрес', 'Долгота', 'Широта', 'Интервал повторений', 'Дни недели',
'№п/п не четная нед.']].drop_duplicates(subset=['Внешний ID ТТ', 'Дни недели'], keep='last', inplace=False)
# Merge two tables on ID TT
merge_table = | pd.merge(odd_part, even_part, on='Внешний ID ТТ', how='left') | pandas.merge |
"""
Utility functions used for AMPL dataset curation and creation.
"""
""" TOC:
aggregate_assay_data(assay_df, value_col='VALUE_NUM', output_value_col=None,
label_actives=True,
active_thresh=None,
id_col='CMPD_NUMBER', smiles_col='rdkit_smiles', relation_col='VALUE_FLAG', date_col=None)
replicate_rmsd(dset_df, smiles_col='base_rdkit_smiles', value_col='PIC50', relation_col='relation')
mle_censored_mean(cmpd_df, std_est, value_col='PIC50', relation_col='relation')
get_three_level_class(value, red_thresh, yellow_thresh)
get_binary_class(value, thresh=4.0)
set_group_permissions(path, system='AD', owner='GSK')
filter_in_by_column_values (column, values, data)
filter_out_by_column_values (column, values, data)
filter_out_comments (values, values_cs, data) ...delete rows that contain comments listed (can specify 'case sensitive' if needed)
get_rdkit_smiles_parent (data)...................creates a new column with the rdkit smiles parent (salts stripped off)
average_and_remove_duplicates (column, tolerance, list_bad_duplicates, data)
summarize_data(column, num_bins, title, units, filepath, data)..............prints mix/max/avg/histogram
"""
import os
import pdb
import pandas as pd
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize_scalar
from sklearn import metrics
import logging
import urllib3
from atomsci.ddm.utils.struct_utils import get_rdkit_smiles, base_smiles_from_smiles
feather_supported = True
try:
import feather
except (ImportError, AttributeError, ModuleNotFoundError):
feather_supported = False
from rdkit import Chem
from rdkit.Chem.Descriptors import MolWt
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
# ******************************************************************************************************************************************
def set_group_permissions(path, system='AD', owner='GSK'):
"""Set file group and permissions to standard values for a dataset containing proprietary
data owned by 'owner'. Later we may add a 'public' option, or groups for data from other pharma companies.
Args:
path (string): File path
system (string): Computing environment from which group ownerships will be derived; currently, either 'LC' for LC
filesystems or 'AD' for LLNL systems where owners and groups are managed by Active Directory.
owner (string): Who the data belongs to, either 'public' or the name of a company (e.g. 'GSK') associated with a
restricted access group.
Returns:
None
"""
# Currently, if we're not on an LC machine, we're on an AD-controlled system. This could change.
if system != 'LC':
system = 'AD'
owner_group_map = dict(GSK = {'LC' : 'gskcraa', 'AD' : 'gskusers-ad'},
public = {'LC' : 'atom', 'AD' : 'atom'} )
group = owner_group_map[owner][system]
shutil.chown(path, group=group)
os.chmod(path, 0o770)
# ******************************************************************************************************************************************
def replicate_rmsd(dset_df, smiles_col='base_rdkit_smiles', value_col='PIC50', relation_col='relation'):
"""
Compute RMS deviation of all replicate uncensored measurements in dset_df from their means. Measurements are treated
as replicates if they correspond to the same SMILES string, and are considered censored if the relation
column contains > or <. The resulting value is meant to be used as an estimate of measurement error for all compounds
in the dataset.
"""
dset_df = dset_df[~(dset_df[relation_col].isin(['<', '>']))]
uniq_smiles, uniq_counts = np.unique(dset_df[smiles_col].values, return_counts=True)
smiles_with_reps = uniq_smiles[uniq_counts > 1]
uniq_devs = []
for smiles in smiles_with_reps:
values = dset_df[dset_df[smiles_col] == smiles][value_col].values
uniq_devs.extend(values - values.mean())
uniq_devs = np.array(uniq_devs)
rmsd = np.sqrt(np.mean(uniq_devs ** 2))
return rmsd
# ******************************************************************************************************************************************
def mle_censored_mean(cmpd_df, std_est, value_col='PIC50', relation_col='relation'):
"""
Compute a maximum likelihood estimate of the true mean value underlying the distribution of replicate assay measurements for a
single compound. The data may be a mix of censored and uncensored measurements, as indicated by the 'relation' column in the input
data frame cmpd_df. std_est is an estimate for the standard deviation of the distribution, which is assumed to be Gaussian;
we typically compute a common estimate for the whole dataset using replicate_rmsd().
"""
left_censored = np.array(cmpd_df[relation_col].values == '<', dtype=bool)
right_censored = np.array(cmpd_df[relation_col].values == '>' , dtype=bool)
not_censored = ~(left_censored | right_censored)
n_left_cens = sum(left_censored)
n_right_cens = sum(right_censored)
nreps = cmpd_df.shape[0]
values = cmpd_df[value_col].values
nan = float('nan')
relation = ''
# If all the replicate values are left- or right-censored, return the smallest or largest reported (threshold) value accordingly.
if n_left_cens == nreps:
mle_value = min(values)
relation = '<'
elif n_right_cens == nreps:
mle_value = max(values)
relation = '>'
elif n_left_cens + n_right_cens == 0:
# If no values are censored, the MLE is the actual mean.
mle_value = np.mean(values)
else:
# Some, but not all observations are censored.
# First, define the negative log likelihood function
def loglik(mu):
ll = -sum(norm.logpdf(values[not_censored], loc=mu, scale=std_est))
if n_left_cens > 0:
ll -= sum(norm.logcdf(values[left_censored], loc=mu, scale=std_est))
if n_right_cens > 0:
ll -= sum(norm.logsf(values[right_censored], loc=mu, scale=std_est))
return ll
# Then minimize it
opt_res = minimize_scalar(loglik, method='brent')
if not opt_res.success:
print('Likelihood maximization failed, message is: "%s"' % opt_res.message)
mle_value = nan
else:
mle_value = opt_res.x
return mle_value, relation
# ******************************************************************************************************************************************
def aggregate_assay_data(assay_df, value_col='VALUE_NUM', output_value_col=None,
label_actives=True,
active_thresh=None,
id_col='CMPD_NUMBER', smiles_col='rdkit_smiles', relation_col='VALUE_FLAG', date_col=None):
"""
Map RDKit SMILES strings in assay_df to base structures, then compute an MLE estimate of the mean value over replicate measurements
for the same SMILES strings, taking censoring into account. Generate an aggregated result table with one value for each unique base
SMILES string, to be used in an ML-ready dataset.
:param assay_df: The input data frame to be processed.
:param value_col: The column in the data frame containing assay values to be averaged.
:param output_value_col: Optional; the column name to use in the output data frame for the averaged data.
:param label_actives: If True, generate an additional column 'active' indicating whether the mean value is above a threshold specified by active_thresh.
:param active_thresh: The threshold to be used for labeling compounds as active or inactive.
If active_thresh is None (the default), the threshold used is the minimum reported value across all records
with left-censored values (i.e., those with '<' in the relation column.
:param id_col: The input data frame column containing compound IDs.
:param smiles_col: The input data frame column containing SMILES strings.
:param relation_col: The input data frame column containing relational operators (<, >, etc.).
:param date_col: The input data frame column containing dates when the assay data was uploaded. If not None, the code will assign the earliest
date among replicates to the aggregate data record.
:return: A data frame containing averaged assay values, with one value per compound.
"""
assay_df = assay_df.fillna({relation_col: '', smiles_col: ''})
# Filter out rows where SMILES is missing
n_missing_smiles = np.array([len(smiles) == 0 for smiles in assay_df[smiles_col].values]).sum()
print("%d entries in input table are missing SMILES strings" % n_missing_smiles)
has_smiles = np.array([len(smiles) > 0 for smiles in assay_df[smiles_col].values])
assay_df = assay_df[has_smiles].copy()
# Estimate the measurement error across replicates for this assay
std_est = replicate_rmsd(assay_df, smiles_col=smiles_col, value_col=value_col, relation_col=relation_col)
# Map SMILES strings to base structure SMILES strings, then map these to indices into the list of
# unique base structures
orig_smiles_strs = assay_df[smiles_col].values
norig = len(set(orig_smiles_strs))
smiles_strs = [base_smiles_from_smiles(smiles, True) for smiles in orig_smiles_strs]
assay_df['base_rdkit_smiles'] = smiles_strs
uniq_smiles_strs = list(set(smiles_strs))
nuniq = len(uniq_smiles_strs)
print("%d unique SMILES strings are reduced to %d unique base SMILES strings" % (norig, nuniq))
smiles_map = dict([(smiles,i) for i, smiles in enumerate(uniq_smiles_strs)])
smiles_indices = np.array([smiles_map.get(smiles, nuniq) for smiles in smiles_strs])
assay_vals = assay_df[value_col].values
value_flags = assay_df[relation_col].values
# Compute a maximum likelihood estimate of the mean assay value for each compound, averaging over replicates
# and factoring in censoring. Report the censoring/relation/value_flag only if the flags are consistent across
# all replicates. # Exclude compounds that couldn't be mapped to SMILES strings.
cmpd_ids = assay_df[id_col].values
reported_cmpd_ids = ['']*nuniq
reported_value_flags = ['']*nuniq
if date_col is not None:
reported_dates = ['']*nuniq
reported_assay_val = np.zeros(nuniq, dtype=float)
for i in range(nuniq):
cmpd_ind = np.where(smiles_indices == i)[0]
cmpd_df = assay_df.iloc[cmpd_ind]
reported_assay_val[i], reported_value_flags[i] = mle_censored_mean(cmpd_df, std_est, value_col=value_col,
relation_col=relation_col)
# When multiple compound IDs map to the same base SMILES string, use the lexicographically smallest one.
reported_cmpd_ids[i] = sorted(set(cmpd_ids[cmpd_ind]))[0]
# If a date column is specified, use the earliest one among replicates
if date_col is not None:
# np.datetime64 doesn't seem to understand the date format in GSK's crit res tables
#earliest_date = sorted([np.datetime64(d) for d in cmpd_df[date_col].values])[0]
earliest_date = sorted(pd.to_datetime(cmpd_df[date_col], infer_datetime_format=True).values)[0]
reported_dates[i] = np.datetime_as_string(earliest_date)
if output_value_col is None:
output_value_col = value_col
agg_df = pd.DataFrame({
'compound_id' : reported_cmpd_ids,
'base_rdkit_smiles' : uniq_smiles_strs,
'relation' : reported_value_flags,
output_value_col : reported_assay_val})
if date_col is not None:
agg_df[date_col] = reported_dates
# Label each compound as active or not, based on the reported relation and values relative to a common threshold
if label_actives:
inactive_df = agg_df[agg_df.relation == '<']
if inactive_df.shape[0] > 0 and active_thresh is None:
active_thresh = np.min(inactive_df[output_value_col].values)
if active_thresh is not None:
is_active = ((agg_df.relation != '<') & (agg_df[output_value_col].values > active_thresh))
agg_df['active'] = [int(a) for a in is_active]
else:
agg_df['active'] = 1
return agg_df
# ******************************************************************************************************************************************
def freq_table(dset_df, column, min_freq=1):
"""
Generate a data frame tabulating the repeat frequencies of each unique value in 'vals'.
Restrict it to values occurring at least min_freq times.
"""
vals = dset_df[column].values
uniq_vals, counts = np.unique(vals, return_counts=True)
uniq_df = pd.DataFrame({column: uniq_vals, 'Count': counts}).sort_values(by='Count', ascending=False)
uniq_df = uniq_df[uniq_df.Count >= min_freq]
return uniq_df
# ******************************************************************************************************************************************
def labeled_freq_table(dset_df, columns, min_freq=1):
"""
Generate a frequency table in which additional columns are included. The first column in 'columns'
is assumed to be a unique ID; there should be a many-to-1 mapping from the ID to each of the additional
columns.
"""
id_col = columns[0]
freq_df = freq_table(dset_df, id_col, min_freq=min_freq)
uniq_ids = freq_df[id_col].values
addl_cols = columns[1:]
addl_vals = {colname: [] for colname in addl_cols}
uniq_df = dset_df.drop_duplicates(subset=columns)
for uniq_id in uniq_ids:
subset_df = uniq_df[uniq_df[id_col] == uniq_id]
if subset_df.shape[0] > 1:
raise Exception("Additional columns should be unique for ID %s" % uniq_id)
for colname in addl_cols:
addl_vals[colname].append(subset_df[colname].values[0])
for colname in addl_cols:
freq_df[colname] = addl_vals[colname]
return freq_df
# ******************************************************************************************************************************************
# The functions below are from <NAME>'s data_utils module.
# ******************************************************************************************************************************************
def filter_in_out_by_column_values (column, values, data, in_out):
"""Include rows only for given values in specified column.
column - column name.
values - list of acceptable values.
"""
if in_out == 'in':
data = data.loc[data[column].isin (values)]
else:
data = data.loc[~data[column].isin (values)]
return data
# ******************************************************************************************************************************************
def filter_in_by_column_values (column, values, data):
return filter_in_out_by_column_values (column, values, data, 'in')
# ******************************************************************************************************************************************
def filter_out_by_column_values (column, values, data):
return filter_in_out_by_column_values (column, values, data, 'out')
# ******************************************************************************************************************************************
def filter_out_comments (values, values_cs, data):
"""Remove rows that contain the text listed
values - list of values that are not case sensitive
values_cs - list of values that are case sensitive
"""
column = 'COMMENTS'
data['Remove'] = np.where (data[column].str.contains ('|'.join (values), case=False), 1, 0)
data['Remove'] = np.where (data[column].str.contains ('|'.join (values_cs), case=True), 1, data['Remove'])
data['Remove'] = np.where (data[column].str.contains ('nan', case=False), 0, data['Remove'])
data['Remove'] = np.where (data[column] == ' ', 0, data['Remove'])
data_removed = data[data.Remove == 1]
data = data[data.Remove != 1]
data_removed = data_removed['COMMENTS']
#print(data_removed)
del data['Remove']
# Results
#print ("")
#print('Remove results with comments indicating bad data')
#print("Dataframe size", data.shape[:])
#comments = pd.DataFrame(data['COMMENTS'].unique())
#comments = comments.sort_values(comments.columns[0])
#print (comments) # For the purpose of reviewing comments remaining
return data
# ******************************************************************************************************************************************
def get_rdkit_smiles_parent (data):
print ("")
print ("Adding SMILES column 'rdkit_smiles_parent' with salts stripped...(may take a while)", flush=True)
""" ___Strip the salts off the rdkit SMILES strings___
First, loops through data and determines the base/parent smiles string for each row.
Appends the base smiles string to a new row in a list.
Then adds the list as a new column in 'data'"
"""
i_max = data.shape[0]
rdkit_smiles_parent = []
for i in range (i_max):
smile = data['rdkit_smiles'].iloc[i]
if type (smile) is float:
split = ''
else:
split = base_smiles_from_smiles (smile)
rdkit_smiles_parent.append (split)
# 2. Add base smiles string (stripped smiles) to dataset
data['rdkit_smiles_parent'] = rdkit_smiles_parent
return data
# ******************************************************************************************************************************************
def average_and_remove_duplicates (column, tolerance, list_bad_duplicates, data, max_stdev = 100000, compound_id='CMPD_NUMBER',smiles_col='rdkit_smiles_parent'):
"""This while loop loops through until no'bad duplicates' are left.
column - column with the value of interest
tolerance - acceptable % difference between value and average
ie.: if "[(value - mean)/mean*100]>tolerance" then remove data row
note: The mean is recalculated on each loop through to make sure it isn't skewed by the 'bad duplicate' values"""
list_bad_duplicates = list_bad_duplicates
i = 0
bad_duplicates = 1
removed = []
removed = | pd.DataFrame(removed) | pandas.DataFrame |
from __future__ import division, print_function
import logging
from builtins import range
from datetime import datetime
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from .timedelta import Timedelta
from featuretools import variable_types as vtypes
from featuretools.utils import is_string
from featuretools.utils.wrangle import (
_check_time_type,
_check_timedelta,
_dataframes_equal
)
logger = logging.getLogger('featuretools.entityset')
_numeric_types = vtypes.PandasTypes._pandas_numerics
_categorical_types = [vtypes.PandasTypes._categorical]
_datetime_types = vtypes.PandasTypes._pandas_datetimes
class Entity(object):
"""Represents an entity in a Entityset, and stores relevant metadata and data
An Entity is analogous to a table in a relational database
See Also:
:class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet`
"""
id = None
variables = None
time_index = None
index = None
def __init__(self, id, df, entityset, variable_types=None,
index=None, time_index=None, secondary_time_index=None,
last_time_index=None, encoding=None, relationships=None,
already_sorted=False, created_index=None, verbose=False):
""" Create Entity
Args:
id (str): Id of Entity.
df (pd.DataFrame): Dataframe providing the data for the
entity.
entityset (EntitySet): Entityset for this Entity.
variable_types (dict[str -> dict[str -> type]]) : Optional mapping of
entity_id to variable_types dict with which to initialize an
entity's store.
An entity's variable_types dict maps string variable ids to types (:class:`.Variable`).
index (str): Name of id column in the dataframe.
time_index (str): Name of time column in the dataframe.
secondary_time_index (dict[str -> str]): Dictionary mapping columns
in the dataframe to the time index column they are associated with.
last_time_index (pd.Series): Time index of the last event for each
instance across all child entities.
encoding (str, optional)) : If None, will use 'ascii'. Another option is 'utf-8',
or any encoding supported by pandas.
relationships (list): List of known relationships to other entities,
used for inferring variable types.
"""
assert is_string(id), "Entity id must be a string"
assert len(df.columns) == len(set(df.columns)), "Duplicate column names"
self.data = {"df": df,
"last_time_index": last_time_index,
}
self.encoding = encoding
self._verbose = verbose
self.created_index = created_index
self.convert_all_variable_data(variable_types)
self.id = id
self.entityset = entityset
variable_types = variable_types or {}
self.index = index
self.time_index = time_index
self.secondary_time_index = secondary_time_index or {}
# make sure time index is actually in the columns
for ti, cols in self.secondary_time_index.items():
if ti not in cols:
cols.append(ti)
relationships = relationships or []
link_vars = [v.id for rel in relationships for v in [rel.parent_variable, rel.child_variable]
if v.entity.id == self.id]
inferred_variable_types = self.infer_variable_types(ignore=list(variable_types.keys()),
link_vars=link_vars)
for var_id, desired_type in variable_types.items():
if isinstance(desired_type, tuple):
desired_type = desired_type[0]
inferred_variable_types.update({var_id: desired_type})
self.variables = []
for v in inferred_variable_types:
# TODO document how vtype can be tuple
vtype = inferred_variable_types[v]
if isinstance(vtype, tuple):
# vtype is (ft.Variable, dict_of_kwargs)
_v = vtype[0](v, self, **vtype[1])
else:
_v = inferred_variable_types[v](v, self)
self.variables += [_v]
# do one last conversion of data once we've inferred
self.convert_all_variable_data(inferred_variable_types)
# make sure index is at the beginning
index_variable = [v for v in self.variables
if v.id == self.index][0]
self.variables = [index_variable] + [v for v in self.variables
if v.id != self.index]
self.update_data(df=self.df,
already_sorted=already_sorted,
recalculate_last_time_indexes=False)
def __repr__(self):
repr_out = u"Entity: {}\n".format(self.id)
repr_out += u" Variables:"
for v in self.variables:
repr_out += u"\n {} (dtype: {})".format(v.id, v.dtype)
shape = self.shape
repr_out += u"\n Shape:\n (Rows: {}, Columns: {})".format(
shape[0], shape[1])
# encode for python 2
if type(repr_out) != str:
repr_out = repr_out.encode("utf-8")
return repr_out
@property
def shape(self):
return self.df.shape
def __eq__(self, other, deep=False):
if self.index != other.index:
return False
if self.time_index != other.time_index:
return False
if self.secondary_time_index != other.secondary_time_index:
return False
if len(self.variables) != len(other.variables):
return False
for v in self.variables:
if v not in other.variables:
return False
if deep:
if self.last_time_index is None and other.last_time_index is not None:
return False
elif self.last_time_index is not None and other.last_time_index is None:
return False
elif self.last_time_index is not None and other.last_time_index is not None:
if not self.last_time_index.equals(other.last_time_index):
return False
if not _dataframes_equal(self.df, other.df):
return False
return True
def __sizeof__(self):
return sum([value.__sizeof__() for value in self.data.values()])
@property
def is_metadata(self):
return self.entityset.is_metadata
@property
def df(self):
return self.data["df"]
@df.setter
def df(self, _df):
self.data["df"] = _df
@property
def last_time_index(self):
return self.data["last_time_index"]
@last_time_index.setter
def last_time_index(self, lti):
self.data["last_time_index"] = lti
@property
def parents(self):
return [p.parent_entity.id for p in self.entityset.get_forward_relationships(self.id)]
def __hash__(self):
return id(self.id)
def __getitem__(self, variable_id):
return self._get_variable(variable_id)
def _get_variable(self, variable_id):
"""Get variable instance
Args:
variable_id (str) : Id of variable to get.
Returns:
:class:`.Variable` : Instance of variable.
Raises:
RuntimeError : if no variable exist with provided id
"""
for v in self.variables:
if v.id == variable_id:
return v
raise KeyError("Variable: %s not found in entity" % (variable_id))
@property
def variable_types(self):
return {v.id: type(v) for v in self.variables}
def convert_variable_type(self, variable_id, new_type,
convert_data=True,
**kwargs):
"""Convert variable in dataframe to different type
Args:
variable_id (str) : Id of variable to convert.
new_type (subclass of `Variable`) : Type of variable to convert to.
entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity.
convert_data (bool) : If True, convert underlying data in the EntitySet.
Raises:
RuntimeError : Raises if it cannot convert the underlying data
Examples:
>>> es["customer"].convert_variable_type("education_level", vtypes.Categorical, EntitySet)
True
"""
if convert_data:
# first, convert the underlying data (or at least try to)
self.convert_variable_data(
variable_id, new_type, **kwargs)
# replace the old variable with the new one, maintaining order
variable = self._get_variable(variable_id)
new_variable = new_type.create_from(variable)
self.variables[self.variables.index(variable)] = new_variable
def convert_all_variable_data(self, variable_types):
for var_id, desired_type in variable_types.items():
type_args = {}
if isinstance(desired_type, tuple):
# grab args before assigning type
type_args = desired_type[1]
desired_type = desired_type[0]
if var_id not in self.df.columns:
raise LookupError("Variable ID %s not in DataFrame" % (var_id))
current_type = self.df[var_id].dtype.name
if issubclass(desired_type, vtypes.Numeric) and \
current_type not in _numeric_types:
self.convert_variable_data(var_id, desired_type, **type_args)
if issubclass(desired_type, vtypes.Discrete) and \
current_type not in _categorical_types:
self.convert_variable_data(var_id, desired_type, **type_args)
if issubclass(desired_type, vtypes.Datetime) and \
current_type not in _datetime_types:
self.convert_variable_data(var_id, desired_type, **type_args)
def convert_variable_data(self, column_id, new_type, **kwargs):
"""
Convert variable in data set to different type
"""
df = self.df
if df[column_id].empty:
return
if new_type == vtypes.Numeric:
orig_nonnull = df[column_id].dropna().shape[0]
df[column_id] = | pd.to_numeric(df[column_id], errors='coerce') | pandas.to_numeric |
import nltk
import string
import re
import numpy as np
import pandas as pd
import pickle
import sys
import numpy as np
import os
import pandas as pd
from sklearn import preprocessing
import re
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score , roc_auc_score , log_loss
import sklearn.linear_model as lm
from sklearn.model_selection import GridSearchCV
import Stemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import roc_auc_score, log_loss
from numpy import linalg as LA
from sklearn import neighbors
from sklearn.neural_network import MLPClassifier
from bs4 import BeautifulSoup
#import xgboost as xgb
import matplotlib.pyplot as plt
import seaborn as sns
from nltk.stem.porter import *
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from sklearn.feature_extraction import stop_words
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.linear_model import Ridge
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import fbeta_score, make_scorer
def split_cat(text):
try: return text.split("/")
except: return ("Other", "Other", "Other")
train = pd.read_csv('train.tsv', sep='\t')
test = pd.read_csv('test.tsv', sep='\t')
sample_submission = | pd.read_csv("sample_submission.csv") | pandas.read_csv |
import tensorflow as tf
import numpy as np
import scipy.io as sio
import pandas as pd
import os
import csv
from feature_encoding import *
from keras.models import load_model
from keras.utils import to_categorical
import Efficient_CapsNet_sORF150
import Efficient_CapsNet_sORF250
import lightgbm as lgb
from sklearn.metrics import roc_auc_score
import sys
from optparse import OptionParser
##read Fasta sequence
def readFasta(file):
if os.path.exists(file) == False:
print('Error: "' + file + '" does not exist.')
sys.exit(1)
with open(file) as f:
records = f.read()
if re.search('>', records) == None:
print('The input file seems not in fasta format.')
sys.exit(1)
records = records.split('>')[1:]
myFasta = []
for fasta in records:
array = fasta.split('\n')
name, sequence = array[0].split()[0], re.sub('[^ARNDCQEGHILKMFPSTWYV-]', '-', ''.join(array[1:]).upper())
myFasta.append([name, sequence])
return myFasta
##extract sORF sequence
def get_sORF(fastas):
sORF_seq = []
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
g = 0
if len(seq) > 303:
for j in range(len(seq)-2):
seg_start = seq[j:j+3]
if seg_start == 'ATG':
for k in range(j+3, len(seq)-2, 3):
seg_end = seq[k:k+3]
if seg_end == 'TAA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TAG':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TGA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
elif len(seq) <= 303 and np.mod(len(seq), 3) != 0:
for j in range(len(seq)-2):
seg_start = seq[j:j+3]
if seg_start == 'ATG':
for k in range(j+3, len(seq)-2, 3):
seg_end = seq[k:k+3]
if seg_end == 'TAA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TAG':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TGA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
elif seq[0:3] == 'ATG' and seq[len(seq)-3:len(seq)] == 'TAA' and np.mod(len(seq), 3) == 0 and 12 <= len(seq) <= 303:
sORF_seq.append([name, seq])
elif seq[0:3] == 'ATG' and seq[len(seq)-3:len(seq)] == 'TAG' and np.mod(len(seq), 3) == 0 and 12 <= len(seq) <= 303:
sORF_seq.append([name, seq])
elif seq[0:3] == 'ATG' and seq[len(seq)-3:len(seq)] == 'TGA' and np.mod(len(seq), 3) == 0 and 12 <= len(seq) <= 303:
sORF_seq.append([name, seq])
return sORF_seq
##get protein sequence
def get_protein(fastas):
protein_seq=[]
start_codon = 'ATG'
codon_table = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '', 'TAG': '',
'TGC': 'C', 'TGT': 'C', 'TGA': '', 'TGG': 'W'}
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
start_site = re.search(start_codon, seq)
protein = ''
for site in range(start_site.start(), len(seq), 3):
protein = protein + codon_table[seq[site:site+3]]
protein_name = '>Micropeptide_' + name
protein_seq.append([protein_name, protein])
return protein_seq
##extract features
def feature_encode(datapath, dna_seq, protein_seq, s_type, d_type):
if s_type == 'H.sapiens':
if d_type == 'CDS':
c_m = pd.read_csv(datapath + 'human_cds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = pd.read_csv(datapath + 'human_cds_trainn_6mermean.csv', header=None, delimiter=',')
Tc_pos1 = pd.read_csv(datapath + 'human_cds_trainp_framed_3mer_1.csv', header=None, delimiter=',')
Tc_neg1 = pd.read_csv(datapath + 'human_cds_trainn_framed_3mer_1.csv', header=None, delimiter=',')
Tc_pos2 = pd.read_csv(datapath + 'human_cds_trainp_framed_3mer_2.csv', header=None, delimiter=',')
Tc_neg2 = pd.read_csv(datapath + 'human_cds_trainn_framed_3mer_2.csv', header=None, delimiter=',')
Tc_pos3 = pd.read_csv(datapath + 'human_cds_trainp_framed_3mer_3.csv', header=None, delimiter=',')
Tc_neg3 = pd.read_csv(datapath + 'human_cds_trainn_framed_3mer_3.csv', header=None, delimiter=',')
fea1_1 = np.array(ratio_ORFlength_hcds(dna_seq))
dna_fea = np.array(extract_DNAfeatures(dna_seq, c_m, nc_m, Tc_pos1, Tc_neg1, Tc_pos2, Tc_neg2, Tc_pos3, Tc_neg3, fea1_1))
elif d_type == 'non-CDS':
c_m = pd.read_csv(datapath + 'human_noncds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = pd.read_csv(datapath + 'human_noncds_trainn_6mermean.csv', header=None, delimiter=',')
Tc_pos1 = pd.read_csv(datapath + 'human_noncds_trainp_framed_3mer_1.csv', header=None, delimiter=',')
Tc_neg1 = pd.read_csv(datapath + 'human_noncds_trainn_framed_3mer_1.csv', header=None, delimiter=',')
Tc_pos2 = pd.read_csv(datapath + 'human_noncds_trainp_framed_3mer_2.csv', header=None, delimiter=',')
Tc_neg2 = pd.read_csv(datapath + 'human_noncds_trainn_framed_3mer_2.csv', header=None, delimiter=',')
Tc_pos3 = pd.read_csv(datapath + 'human_noncds_trainp_framed_3mer_3.csv', header=None, delimiter=',')
Tc_neg3 = pd.read_csv(datapath + 'human_noncds_trainn_framed_3mer_3.csv', header=None, delimiter=',')
fea1_1 = np.array(ratio_ORFlength_hnoncds(dna_seq))
dna_fea = np.array(extract_DNAfeatures(dna_seq, c_m, nc_m, Tc_pos1, Tc_neg1, Tc_pos2, Tc_neg2, Tc_pos3, Tc_neg3, fea1_1))
else:
print("Type error")
elif s_type == 'M.musculus':
if d_type == 'CDS':
c_m = pd.read_csv(datapath + 'mouse_cds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = pd.read_csv(datapath + 'mouse_cds_trainn_6mermean.csv', header=None, delimiter=',')
Tc_pos1 = pd.read_csv(datapath + 'mouse_cds_trainp_framed_3mer_1.csv', header=None, delimiter=',')
Tc_neg1 = pd.read_csv(datapath + 'mouse_cds_trainn_framed_3mer_1.csv', header=None, delimiter=',')
Tc_pos2 = pd.read_csv(datapath + 'mouse_cds_trainp_framed_3mer_2.csv', header=None, delimiter=',')
Tc_neg2 = pd.read_csv(datapath + 'mouse_cds_trainn_framed_3mer_2.csv', header=None, delimiter=',')
Tc_pos3 = pd.read_csv(datapath + 'mouse_cds_trainp_framed_3mer_3.csv', header=None, delimiter=',')
Tc_neg3 = pd.read_csv(datapath + 'mouse_cds_trainn_framed_3mer_3.csv', header=None, delimiter=',')
fea1_1 = np.array(ratio_ORFlength_mcds(dna_seq))
dna_fea = np.array(extract_DNAfeatures(dna_seq, c_m, nc_m, Tc_pos1, Tc_neg1, Tc_pos2, Tc_neg2, Tc_pos3, Tc_neg3, fea1_1))
elif d_type == 'non-CDS':
c_m = pd.read_csv(datapath + 'mouse_noncds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = | pd.read_csv(datapath + 'mouse_noncds_trainn_6mermean.csv', header=None, delimiter=',') | pandas.read_csv |
#!/usr/bin/env python
import sys, time
import numpy as np
from io import StringIO
import pickle as pickle
from pandas import DataFrame
from pandas import concat
from pandas import read_pickle
from pandas import cut
from pandas import concat
from sklearn.externals import joblib
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from djeval import *
CROSS_VALIDATION_N = 150000
MIN_SAMPLES_LEAF = 300
MIN_SAMPLES_SPLIT = 1000
FITTING_N = 50000
n_estimators = 200
cv_groups = 3
n_jobs = -1
# For when we were messing around with blundergroups:
#
# so that we can test the idea that dataset size being equal,
# we make better predictions with data specifically in that blundergroup
#
# CROSS_VALIDATION_N = 7500
# debugging 'Cannot allocate memory'
n_jobs = 1
if False:
inflation = 4
CROSS_VALIDATION_N = inflation * CROSS_VALIDATION_N
MIN_SAMPLES_LEAF = inflation * MIN_SAMPLES_LEAF
MIN_SAMPLES_SPLIT = inflation * MIN_SAMPLES_SPLIT
FITTING_N = inflation * FITTING_N
just_testing = False
if just_testing:
CROSS_VALIDATION_N = 1500
n_estimators = 2
n_jobs = -1
blunder_cv_results = []
def sample_df(df, n_to_sample):
if n_to_sample >= len(df.index.values):
return df
row_indexes = np.random.choice(df.index.values, n_to_sample, replace=False)
return df.ix[row_indexes]
def group_scorer(estimator, X, y):
pred_y = estimator.predict(X)
msg("GROUPED SCORES FOR a CV GROUP:")
dfx = DataFrame(X, columns=features_to_use)
dfx['pred_abserror'] = abs(pred_y - y)
blunder_cvgroups, blunder_cvbins = cut(dfx['movergain'], blunder_cats, retbins=True)
blunder_cvgrouped = dfx.groupby(blunder_cvgroups)['pred_abserror'].agg({'lad': np.mean})
blunder_cv_results.append(blunder_cvgrouped)
msg("scores: %s" % str(blunder_cvgrouped))
return mean_absolute_error(y, pred_y)
def crossval_rfr(df):
sampled_df = sample_df(df, CROSS_VALIDATION_N)
sample_size = len(sampled_df)
mss = max([sample_size / 150, 100])
msl = max([sample_size / 450, 30])
# rfr_here = RandomForestRegressor(n_estimators=n_estimators, n_jobs=n_jobs, min_samples_leaf=msl, min_samples_split=mss, verbose=1)
rfr_here = RandomForestRegressor(n_estimators=n_estimators, n_jobs=n_jobs, min_samples_leaf=MIN_SAMPLES_LEAF, min_samples_split=MIN_SAMPLES_SPLIT, verbose=1)
crossval_X = sampled_df[features_to_use]
crossval_y = sampled_df['elo']
crossval_weights = sampled_df['weight']
msg("Starting cross validation. %i records" % sample_size)
begin_time = time.time()
cvs = cross_val_score(rfr_here, crossval_X, crossval_y, cv=cv_groups, n_jobs=n_jobs, scoring='mean_absolute_error', fit_params={'sample_weight': crossval_weights})
msg("Cross validation took %f seconds with %i threads, %i records, %i estimators and %i CV groups" % ((time.time() - begin_time), n_jobs, len(crossval_X), n_estimators, cv_groups))
msg("Results: %f, %s" % (np.mean(cvs), str(cvs)))
return np.mean(cvs)
msg("Hi, reading moves.")
moves_df = | read_pickle(sys.argv[1]) | pandas.read_pickle |
import config_my
import requests
from bs4 import BeautifulSoup
import pandas as pd
# from tabulate import tabulate
import telebot
print(config_my.token)
bot = telebot.TeleBot(config_my.token)
pict = [
'https://avatars.mds.yandex.net/get-pdb/2864819/2091b635-1a05-4a81-9f4f-9cdd46cb9be0/s1200',
'https://avatars.mds.yandex.net/get-zen_doc/196516/pub_5d65e93efe289100adb4c54e_5d66099378125e00ac052d00/scale_1200',
'https://avatars.mds.yandex.net/get-pdb/1683100/d71b5f09-b408-42ce-b480-cbcd0d340efe/s1200?webp=false',
'https://avatars.mds.yandex.net/get-zen_doc/1899089/pub_5d9b5f2f35c8d800ae71fb5a_5d9b60a98f011100b48eb4fb/scale_1200',
'https://avatars.mds.yandex.net/get-zen_doc/196516/pub_5d65e93efe289100adb4c54e_5d66099378125e00ac052d00/scale_1200',
'http://ysia.ru/wp-content/uploads/2018/01/1-19.jpg'
]
def stat(tag=0):
url = 'https://www.worldometers.info/coronavirus/'
website = requests.get(url).text
soup = BeautifulSoup(website, 'lxml')
table = soup.find_all('table')[tag]
rows = table.find_all('tr')
fields_list = []
for i in range(9):
col = list()
col.append(rows[0].find_all('th')[i + 1].get_text().strip())
for row in rows[1:224]:
r = row.find_all('td')
col.append(r[i + 1].get_text().strip())
fields_list.append(col)
d = dict()
for i in range(9):
d[fields_list[i][0]] = fields_list[i][1:]
df = | pd.DataFrame(d) | pandas.DataFrame |
import pymc3 as pm
import pandas as pd
from covid.models.generative import GenerativeModel
from covid.data import summarize_inference_data
url = '../../data/covid19za_provincial_cumulative_timeline_confirmed.csv'
states_cases = pd.read_csv(url, parse_dates=['date'], dayfirst=True, index_col=0)
states_cases.tail()
url = '../../data/covid19za_timeline_testing.csv'
states_tests = pd.read_csv(url, parse_dates=['date'], dayfirst=True, index_col=0)
states_tests.tail()
cases = pd.Series(states_cases['total'], index=states_cases.index, name='cases')
casezero = states_cases.index[0]
caselast = states_cases.index[-1]
idx = pd.date_range(casezero, caselast)
tests_all = pd.Series(states_tests['cumulative_tests'], index=states_tests.index, name='tests')
tests = tests_all.loc[casezero:caselast]
combined_model = | pd.concat([cases, tests], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import torch
import torchvision
from am_utils.utils import walk_dir
from torch.utils.data import DataLoader
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from tqdm import tqdm
from ..dataset.dataset_object_inference import DatasetObjectInference, DatasetObjectInferenceMosaic
from ..transforms.bbox import get_test_transform
from ..utils.utils import collate_fn
from ..utils.utils import remove_overlapping_boxes, get_boxes_above_threshold
def get_df_of_file_list(input_dir, id_name='image_id'):
"""
List files in given folder and generate a dataframe for the data loader.
Parameters
----------
input_dir : str
Input directory
id_name : str, optional
Column name to specify image ID.
Default is 'image_id'
Returns
-------
pd.DataFrame
Dataframe with a list of input files.
"""
files = walk_dir(input_dir)
files = [fn[len(input_dir) + 1:] for fn in files]
df = | pd.DataFrame({id_name: files}) | pandas.DataFrame |
from django.http import JsonResponse
import requests
import asyncio
import aiohttp
import numpy as np
import pandas as pd
from pandas import json_normalize
import json
from functools import reduce
import unidecode
from random import randint
from time import sleep
import traceback
import sys
import random
import logging
def get_spotify_music_profile(request):
spotifyAPI = SpotifyAPI(request)
try:
music_profile = spotifyAPI.get_music_profile()
return music_profile
except Exception as e:
# traceback.format_exc()
print('GLOBAL EXCEPTION - BAD. RETURNING ERROR TO FRONT END')
logging.exception("music profile refresh exception")
error_report = {
'error': {
'message': str(e),
'status': 500,
}
}
return error_report
class SpotifyAPI:
REQUEST_EXCEPTION_MSG = "Spotify API Request Exception while fetching "
SAVE_PROFILE_AS_CSV = False
USER_PLAYLISTS_ONLY = True # don't change unless you want playlists a user follows to also be included
def __init__(self, access_token):
self.header = {'Authorization' : "Bearer "+access_token}
self.user_id = self.fetch_user_id()
self.artist_columns = []
self.track_columns = []
self.artists_dataframes = []
self.tracks_dataframes = []
def get_music_profile(self):
asyncio.run(self.collect_artists_and_tracks_dataframes())
print("converting dataframes to JSON...")
print(f'returning { self.artists_df.shape[0] } artists and { self.tracks_df.shape[0] } tracks')
if self.SAVE_PROFILE_AS_CSV:
self.artists_df.to_csv('artists_df.csv')
self.tracks_df.to_csv('tracks_df.csv')
artists_json = self.get_artists_json(self.artists_df)
tracks_json = self.get_tracks_json(self.tracks_df)
music_profile = {
"artists" : artists_json,
"tracks" : tracks_json,
}
return music_profile
def get_artists_json(self, artists_df):
return artists_df.to_json(orient='records')
def get_tracks_json(self, tracks_df):
return tracks_df.to_json(orient='records')
async def collect_artists_and_tracks_dataframes(self):
# fetch artists and tracks together, due to how the Spotify API returns both
print("collect_artists_and_tracks_dataframes()...")
tasks = [self.fetch_top_artists("long_term"), self.fetch_top_artists("medium_term"), self.fetch_top_artists("short_term")
, self.fetch_top_tracks("long_term"), self.fetch_top_tracks("medium_term"), self.fetch_top_tracks("short_term")
, self.fetch_followed_artists(), self.fetch_saved_tracks(), self.get_all_playlists()]
await asyncio.gather(*tasks)
print("initial tasks (fetches) have finishing gathering..")
print("initiating get_artists_master_df(), where full artist objects will be fetched..")
self.artists_df = await self.get_artists_master_df()
print("finished fetching full objects.")
self.tracks_df = self.get_tracks_master_df()
async def get_artists_master_df(self):
if self.artists_dataframes == []:
return pd.DataFrame()
artists_df = None
if len(self.artists_dataframes) > 1:
artists_df = reduce(lambda left, right: | pd.merge(left, right, how="outer") | pandas.merge |
"""
@author: hugonnet
derive all values present in the text of the manuscript: accelerations, SLR contributions, etc..
"""
import os, sys
import numpy as np
import pandas as pd
from glob import glob
import pyddem.fit_tools as ft
import pyddem.tdem_tools as tt
reg_dir = '/home/atom/ongoing/work_worldwide/vol/final'
fn_tarea = '/home/atom/data/inventory_products/RGI/tarea_zemp.csv'
list_fn_reg= [os.path.join(reg_dir,'dh_'+str(i).zfill(2)+'_rgi60_int_base_reg.csv') for i in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]]
periods = ['2000-01-01_2005-01-01','2005-01-01_2010-01-01','2010-01-01_2015-01-01','2015-01-01_2020-01-01','2000-01-01_2020-01-01']
tlims = [(np.datetime64('2000-01-01'),np.datetime64('2005-01-01')),(np.datetime64('2005-01-01'),np.datetime64('2010-01-01')),(np.datetime64('2010-01-01'),np.datetime64('2015-01-01')),(np.datetime64('2015-01-01'),np.datetime64('2020-01-01')),(np.datetime64('2000-01-01'),np.datetime64('2020-01-01'))]
list_df = []
for fn_reg in list_fn_reg:
for period in periods:
df_tmp = tt.aggregate_all_to_period(pd.read_csv(fn_reg),[tlims[periods.index(period)]],fn_tarea=fn_tarea,frac_area=1)
list_df.append(df_tmp)
df = pd.concat(list_df)
list_df_all = []
for period in periods:
df_p = df[df.period == period]
df_global = tt.aggregate_indep_regions_rates(df_p)
df_global['reg']='global'
df_global['period'] = period
df_noperiph = tt.aggregate_indep_regions_rates(df_p[~df_p.reg.isin([5, 19])])
df_noperiph['reg']='global_noperiph'
df_noperiph['period'] =period
df_full_p = pd.concat([df_p,df_noperiph,df_global])
list_df_all.append(df_full_p)
df_all = pd.concat(list_df_all)
df_g = df_all[df_all.reg=='global']
df_np = df_all[df_all.reg=='global_noperiph']
#CONTRIBUTION TO SLR
# from <NAME>: AVISO-based sea-level rise trend for 2000.0-2020.0 and 1-sigma errors
gmsl_trend = 3.56
gmsl_trend_err = 0.2
gmsl_acc = 0.15
gmsl_acc_err = 0.04
glac_trend = df_g[df_g.period == '2000-01-01_2020-01-01'].dmdt.values[0]/361.8
glac_trend_err = df_g[df_g.period == '2000-01-01_2020-01-01'].err_dmdt.values[0]/361.8
print('Glacier mass loss totalled '+'{:.2f}'.format(df_g[df_g.period == '2000-01-01_2020-01-01'].dmdt.values[0])+' ± '+'{:.2f}'.format(2*df_g[df_g.period == '2000-01-01_2020-01-01'].err_dmdt.values[0])+ ' Gt yr-1')
print('Glacier mass loss totalled '+'{:.3f}'.format(glac_trend)+' ± '+'{:.3f}'.format(2*glac_trend_err)+ ' mm of sea-level rise')
contr_trend = -glac_trend/gmsl_trend*100
contr_trend_err = -glac_trend/gmsl_trend*np.sqrt((gmsl_trend_err/gmsl_trend)**2+(glac_trend_err/glac_trend)**2)*100
print('Glacier contribution to SLR is '+'{:.2f}'.format(contr_trend)+' % ± '+'{:.2f}'.format(2*contr_trend_err)+' %')
#GLACIER ACCELERATION
beta1_t, beta0, incert_slope, _, _ = ft.wls_matrix(x=np.arange(0,16,5),y=df_g.dhdt.values[:-1],w=1/df_g.err_dhdt.values[:-1]**2)
print('Global thinning acceleration is '+'{:.5f}'.format(beta1_t)+' ± '+'{:.5f}'.format(2*incert_slope)+ ' m yr-2')
beta1, beta0, incert_slope, _, _ = ft.wls_matrix(x=np.array([0,5,10,15]),y=df_np.dhdt.values[:-1],w=1/df_np.err_dhdt.values[:-1]**2)
print('Global excl. GRL and ANT thinning acceleration is '+'{:.5f}'.format(beta1)+' ± '+'{:.5f}'.format(2*incert_slope)+ ' m yr-2')
beta1_g, beta0, incert_slope_g, _, _ = ft.wls_matrix(x=np.arange(0,16,5),y=df_g.dmdt.values[:-1],w=1/df_g.err_dmdt.values[:-1]**2)
print('Global mass loss acceleration is '+'{:.5f}'.format(beta1_g)+' ± '+'{:.5f}'.format(2*incert_slope_g)+ ' Gt yr-2')
beta1, beta0, incert_slope, _, _ = ft.wls_matrix(x=np.array([0,5,10,15]),y=df_np.dmdt.values[:-1],w=1/df_np.err_dmdt.values[:-1]**2)
print('Global excl. GRL and ANT mass loss acceleration is '+'{:.5f}'.format(beta1)+' ± '+'{:.5f}'.format(2*incert_slope)+ ' Gt yr-2')
#CONTRIBUTION TO ACCELERATION OF SLR
glac_acc = -beta1_g/361.8
glac_acc_err = incert_slope_g/361.8
contr_acc = glac_acc/gmsl_acc*100
# error is not symmetrial, error of acceleration of SLR is 20 times larger than glacier error
rss_gmsl_acc_err = np.sqrt(glac_acc_err**2+gmsl_acc_err**2)
upper_bound = glac_acc/(gmsl_acc-2*rss_gmsl_acc_err)*100
lower_bound = glac_acc/(gmsl_acc+2*rss_gmsl_acc_err)*100
print('Glacier contribution to acceleration of SLR is '+'{:.2f}'.format(contr_acc)+' % with 95% confidence interval of '+'{:.1f}'.format(lower_bound)+'-'+'{:.1f}'.format(upper_bound)+' %')
#YEARLY VALUES
periods = ['20'+str(i).zfill(2)+'-01-01_'+'20'+str(i+1).zfill(2)+'-01-01' for i in np.arange(0,20,1)]
tlims = [(np.datetime64('20'+str(i).zfill(2)+'-01-01'),np.datetime64('20'+str(i+1).zfill(2)+'-01-01')) for i in np.arange(0,20,1)]
list_df_yrly = []
for fn_reg in list_fn_reg:
for period in periods:
df_tmp = tt.aggregate_all_to_period(pd.read_csv(fn_reg),[tlims[periods.index(period)]],fn_tarea=fn_tarea,frac_area=1)
list_df_yrly.append(df_tmp)
df_yrly = pd.concat(list_df_yrly)
list_df_all_yrly = []
for period in periods:
df_p = df_yrly[df_yrly.period == period]
df_global = tt.aggregate_indep_regions_rates(df_p)
df_global['reg']='global'
df_global['period'] = period
df_noperiph = tt.aggregate_indep_regions_rates(df_p[~df_p.reg.isin([5, 19])])
df_noperiph['reg']='global_noperiph'
df_noperiph['period'] =period
df_full_p = pd.concat([df_p,df_noperiph,df_global])
list_df_all_yrly.append(df_full_p)
df_all_yrly = pd.concat(list_df_all_yrly)
dhdt_2000_global = df_all_yrly[np.logical_and(df_all_yrly.period=='2000-01-01_2001-01-01',df_all_yrly.reg=='global_noperiph')].dhdt.values[0]
dhdt_2000_global_err = df_all_yrly[np.logical_and(df_all_yrly.period=='2000-01-01_2001-01-01',df_all_yrly.reg=='global_noperiph')].err_dhdt.values[0]
dhdt_2019_global = df_all_yrly[np.logical_and(df_all_yrly.period=='2019-01-01_2020-01-01',df_all_yrly.reg=='global_noperiph')].dhdt.values[0]
dhdt_2019_global_err = df_all_yrly[np.logical_and(df_all_yrly.period=='2019-01-01_2020-01-01',df_all_yrly.reg=='global_noperiph')].err_dhdt.values[0]
print('Global excl. GRL and ANT thinning rates in 2000: '+'{:.3f}'.format(dhdt_2000_global)+' ± '+'{:.3f}'.format(2*dhdt_2000_global_err)+' m yr-1')
print('Global excl. GRL and ANT thinning rates in 2019: '+'{:.3f}'.format(dhdt_2019_global)+' ± '+'{:.3f}'.format(2*dhdt_2019_global_err)+' m yr-1')
# REGIONAL PERCENTAGES
df_tot = df_all[df_all.period == '2000-01-01_2020-01-01']
list_cont_perc = []
for i in range(19):
cont = df_tot[df_tot.reg==i+1].dmdt.values[0]/df_tot[df_tot.reg=='global'].dmdt.values[0]*100
list_cont_perc.append(cont)
print('Contribution of Alaska: '+'{:.1f}'.format(list_cont_perc[0])+' %')
print('Contribution of Greenland Periphery: '+'{:.1f}'.format(list_cont_perc[4])+' %')
print('Contribution of Arctic Canada North: '+'{:.1f}'.format(list_cont_perc[2])+' %')
print('Contribution of Arctic Canada South: '+'{:.1f}'.format(list_cont_perc[3])+' %')
print('Contribution of Antarctic Periphery: '+'{:.1f}'.format(list_cont_perc[18])+' %')
print('Contribution of High Moutain Asia: '+'{:.1f}'.format(list_cont_perc[12]+list_cont_perc[13]+list_cont_perc[14])+' %')
print('Contribution of Southern Andes: '+'{:.1f}'.format(list_cont_perc[16])+' %')
#separate contribution from North Greenland and South: done manually
print('Iceland specific rate: '+'{:.2f}'.format(df_tot[df_tot.reg==6].dmdtda.values[0])+' ± '+'{:.2f}'.format(2*df_tot[df_tot.reg==6].err_dmdtda.values[0])+' m w.e yr-1')
df_nonpolar = tt.aggregate_indep_regions_rates(df_tot[df_tot.reg.isin([10, 11, 12, 16, 17, 18])])
print('Non-polar specific rate: '+'{:.2f}'.format(df_nonpolar.dmdtda.values[0])+' ± '+'{:.2f}'.format(2*df_nonpolar.err_dmdtda.values[0])+' m w.e yr-1')
#for HMA, account for correlated error all at once:
fn_hma=os.path.join(reg_dir,'dh_13_14_15_rgi60_int_base_reg.csv')
df_hma = tt.aggregate_all_to_period(pd.read_csv(fn_hma),[(np.datetime64('2000-01-01'),np.datetime64('2020-01-01'))],fn_tarea=fn_tarea,frac_area=1)
print('HMA specific rate: '+'{:.2f}'.format(df_hma.dmdtda.values[0])+' ± '+'{:.2f}'.format(2*df_hma.err_dmdtda.values[0])+' m w.e yr-1')
print('Antarctic and Subantartic specific rate: '+'{:.2f}'.format(df_tot[df_tot.reg==19].dmdtda.values[0])+' ± '+'{:.2f}'.format(2*df_tot[df_tot.reg==19].err_dmdtda.values[0])+' m w.e yr-1')
#corresponding period for comparison to Shean et al., 2019
df_hma = tt.aggregate_all_to_period(pd.read_csv(fn_hma),[(np.datetime64('2000-01-01'),np.datetime64('2018-01-01'))],fn_tarea=fn_tarea,frac_area=1)
print('Shean comparison: HMA specific rate: '+'{:.2f}'.format(df_hma.dmdtda.values[0])+' ± '+'{:.2f}'.format(2*df_hma.err_dmdtda.values[0])+' m w.e yr-1')
#corresponding period for comparison to Braun et al., 2019
df_sa = tt.aggregate_all_to_period(pd.read_csv(list_fn_reg[16]),[(np.datetime64('2000-01-01'),np.datetime64('2013-01-01'))],fn_tarea=fn_tarea,frac_area=1)
print('Braun comparison: Southern Andes specific rate: '+'{:.2f}'.format(df_sa.dmdtda.values[0])+' ± '+'{:.2f}'.format(2*df_sa.err_dmdtda.values[0])+' m w.e yr-1')
df_trp = tt.aggregate_all_to_period( | pd.read_csv(list_fn_reg[15]) | pandas.read_csv |
# Copyright (c) 2018 The Regents of the University of Michigan
# and the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Utility functions for performing cross-validation for model training/testing.
"""
from morf.utils.log import set_logger_handlers, execute_and_log_output
from morf.utils.docker import load_docker_image, make_docker_run_command
from morf.utils.config import MorfJobConfig
from morf.utils import fetch_complete_courses, fetch_sessions, download_train_test_data, initialize_input_output_dirs, make_feature_csv_name, make_label_csv_name, clear_s3_subdirectory, upload_file_to_s3, download_from_s3, initialize_labels, aggregate_session_input_data
from morf.utils.s3interface import make_s3_key_path
from morf.utils.api_utils import collect_course_cv_results
from multiprocessing import Pool
import logging
import tempfile
import pandas as pd
import os
import numpy as np
from sklearn.model_selection import StratifiedKFold
module_logger = logging.getLogger(__name__)
CONFIG_FILENAME = "config.properties"
mode = "cv"
def make_folds(job_config, raw_data_bucket, course, k, label_type, raw_data_dir="morf-data/"):
"""
Utility function to be called by create_course_folds for creating the folds for a specific course.
:return:
"""
logger = set_logger_handlers(module_logger, job_config)
user_id_col = "userID"
label_col = "label_value"
logger.info("creating cross-validation folds for course {}".format(course))
with tempfile.TemporaryDirectory(dir=job_config.local_working_directory) as working_dir:
input_dir, output_dir = initialize_input_output_dirs(working_dir)
# download data for each session
for session in fetch_sessions(job_config, raw_data_bucket, data_dir=raw_data_dir, course=course,
fetch_all_sessions=True):
# get the session feature and label data
download_train_test_data(job_config, raw_data_bucket, raw_data_dir, course, session, input_dir,
label_type=label_type)
# merge features to ensure splits are correct
feat_csv_path = aggregate_session_input_data("features", os.path.join(input_dir, course))
label_csv_path = aggregate_session_input_data("labels", os.path.join(input_dir, course))
feat_df = pd.read_csv(feat_csv_path, dtype=object)
label_df = pd.read_csv(label_csv_path, dtype=object)
feat_label_df = pd.merge(feat_df, label_df, on=user_id_col)
if feat_df.shape[0] != label_df.shape[0]:
logger.error(
"number of observations in extracted features and labels do not match for course {}; features contains {} and labels contains {} observations".format(
course, feat_df.shape[0], label_df.shape[0]))
# create the folds
logger.info("creating cv splits with k = {} course {} session {}".format(k, course, session))
skf = StratifiedKFold(n_splits=k, shuffle=True)
folds = skf.split(np.zeros(feat_label_df.shape[0]), feat_label_df.label_value)
for fold_num, train_test_indices in enumerate(folds, 1): # write each fold train/test data to csv and push to s3
train_index, test_index = train_test_indices
train_df, test_df = feat_label_df.loc[train_index,].drop(label_col, axis=1), feat_label_df.loc[
test_index,].drop(label_col, axis=1)
train_df_name = os.path.join(working_dir, make_feature_csv_name(course, fold_num, "train"))
test_df_name = os.path.join(working_dir, make_feature_csv_name(course, fold_num, "test"))
train_df.to_csv(train_df_name, index=False)
test_df.to_csv(test_df_name, index=False)
# upload to s3
try:
train_key = make_s3_key_path(job_config, course, os.path.basename(train_df_name))
upload_file_to_s3(train_df_name, job_config.proc_data_bucket, train_key, job_config, remove_on_success=True)
test_key = make_s3_key_path(job_config, course, os.path.basename(test_df_name))
upload_file_to_s3(test_df_name, job_config.proc_data_bucket, test_key, job_config, remove_on_success=True)
except Exception as e:
logger.warning("exception occurred while uploading cv results: {}".format(e))
return
def create_course_folds(label_type, k = 5, multithread = True):
"""
From extract and extract-holdout data, create k randomized folds, pooling data by course (across sessions) and archive results to s3.
:param label_type: type of outcome label to use.
:param k: number of folds.
:param multithread: logical indicating whether multiple cores should be used (if available)
:param raw_data_dir: name of subfolder in s3 buckets containing raw data.
:return:
"""
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
logger = set_logger_handlers(module_logger, job_config)
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
if multithread:
num_cores = job_config.max_num_cores
else:
num_cores = 1
logger.info("creating cross-validation folds")
for raw_data_bucket in job_config.raw_data_buckets:
reslist = []
with Pool(num_cores) as pool:
for course in fetch_complete_courses(job_config, raw_data_bucket):
poolres = pool.apply_async(make_folds, [job_config, raw_data_bucket, course, k, label_type])
reslist.append(poolres)
pool.close()
pool.join()
for res in reslist:
logger.info(res.get())
return
def create_session_folds(label_type, k = 5, multithread = True, raw_data_dir="morf-data/"):
"""
From extract and extract-holdout data, create k randomized folds for each session and archive results to s3.
:param label_type: type of outcome label to use.
:param k: number of folds.
:param multithread: logical indicating whether multiple cores should be used (if available)
:param raw_data_dir: name of subfolder in s3 buckets containing raw data.
:return:
"""
user_id_col = "userID"
label_col = "label_value"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
logger = set_logger_handlers(module_logger, job_config)
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
if multithread:
num_cores = job_config.max_num_cores
else:
num_cores = 1
logger.info("creating cross-validation folds")
with Pool(num_cores) as pool:
for raw_data_bucket in job_config.raw_data_buckets:
for course in fetch_complete_courses(job_config, raw_data_bucket):
for session in fetch_sessions(job_config, raw_data_bucket, data_dir=raw_data_dir, course=course, fetch_all_sessions=True):
with tempfile.TemporaryDirectory(dir=job_config.local_working_directory) as working_dir:
# todo: call make_folds() here via apply_async(); currently this is not parallelized!
input_dir, output_dir = initialize_input_output_dirs(working_dir)
# get the session feature and label data
download_train_test_data(job_config, raw_data_bucket, raw_data_dir, course, session, input_dir, label_type=label_type)
feature_file = os.path.join(input_dir, course, session, make_feature_csv_name(course, session))
label_file = os.path.join(input_dir, course, session, make_label_csv_name(course, session))
feat_df = pd.read_csv(feature_file, dtype=object)
label_df = pd.read_csv(label_file, dtype=object)
# merge features to ensure splits are correct
feat_label_df = | pd.merge(feat_df, label_df, on=user_id_col) | pandas.merge |
from pymoab import core, types
from pymoab.rng import Range
import dagmc_stats.DagmcFile as df
import dagmc_stats.DagmcQuery as dq
import pandas as pd
import numpy as np
import warnings
import pytest
test_env = {'three_vols': 'tests/3vols.h5m',
'single_cube': 'tests/single-cube.h5m', 'pyramid': 'tests/pyramid.h5m'}
def test_pandas_data_frame():
"""Tests the initialization of pandas data frames
"""
single_cube = df.DagmcFile(test_env['single_cube'])
single_cube_query = dq.DagmcQuery(single_cube)
exp_vert_data = pd.DataFrame()
assert(single_cube_query._vert_data.equals(exp_vert_data))
exp_tri_data = pd.DataFrame()
assert(single_cube_query._tri_data.equals(exp_tri_data))
exp_surf_data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
# Based on https://towardsdatascience.com/time-series-of-price-anomaly-detection-13586cd5ff46
def get_distance_by_point(data, model):
"""
Calculates the distance between the points in the data to its nearest centroid
:param data: the data points
:param model: the kmeans model
"""
distance = | pd.Series() | pandas.Series |
import os
import csv
import requests
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from datetime import datetime
import logging
from airflow.decorators import dag, task
os.chdir(os.environ['AIRFLOW_HOME'])
@dag(schedule_interval=None, start_date=datetime(2022, 2, 15), catchup=False, tags=['nyctaxi'])
def nyctaxi():
logging.basicConfig(level=logging.INFO)
@task()
def initialize():
logging.info('initializing...')
# on the first run, it checks for the existence of config.csv file with future trip data ranges
if os.path.isfile('./projects/nyctaxi/config/config.csv'):
# if it finds the file, it knows it is not the first run
first_run = False
with open('./projects/nyctaxi/config/config.csv', 'r') as configfile:
# config.csv logs the taxis, years and months of the missing ranges
reader = csv.reader(configfile)
taxis = set()
years = set()
months = set()
for row in reader:
taxis.add(row[0])
years.add(int(row[1]))
months.add(int((row[2])))
taxis = list(taxis)
years = list(years)
months = list(months)
# checks to see if config.csv is empty
if years:
year_min = years[0]
year_max = years[-1] + 1
month_min = months[0]
month_max = months[-1] + 1
else:
# if config.csv is empty, all downloads are complete
year_min = year_max = month_min = month_max = 0
logging.info('all data is downloaded')
else:
# if config.csv is not available, it initializes with the default configuration
first_run = True
taxis = ['yellow', 'green']
year_min = 2021
year_max = 2022
month_min = 1
month_max = 13
# last, it gets rid of the .gitkeep files to prevent any collision
folders = ['./projects/nyctaxi/tripdata/csv/renamed/', './projects/nyctaxi/tripdata/pq/']
for folder in folders:
with os.scandir(folder) as entries:
for entry in entries:
if entry.name == '.gitkeep':
os.remove(entry.path)
return dict(taxis=taxis, year_min=year_min, year_max=year_max, month_min=month_min, month_max=month_max,
first_run=first_run)
@task()
def extract(init_dict: dict):
if init_dict['taxis']:
logging.info('getting csv...')
csv_available = False
# it sets csv_available to False, because at this point it doesn't know if it will find a file
with open('./projects/nyctaxi/config/config.csv', 'w') as configfile:
configfile_writer = csv.writer(configfile)
for taxi in init_dict['taxis']:
for year in range(init_dict['year_min'], init_dict['year_max']):
for month in range(init_dict['month_min'], init_dict['month_max']):
month_str = str(month).zfill(2)
url = 'https://nyc-tlc.s3.amazonaws.com/trip+data/' + taxi + '_tripdata_' + str(
year) + '-' + month_str + '.csv'
r = requests.get(url, allow_redirects=True)
if r.status_code == requests.codes.ok:
# if it finds a file, it sets csv_available to true, i.e. data is available
csv_available = True
filename = url.split('/')[-1]
logging.info('processing ' + filename)
open('./projects/nyctaxi/tripdata/csv/' + filename, 'wb').write(r.content)
# it renames the pickup datetime column to normalize across
# yellow and green taxi tripdata
# to be able select and run stats on this column for both data sets
with open('./projects/nyctaxi/tripdata/csv/' + filename, 'r') as inFile, \
open('./projects/nyctaxi/tripdata/csv/renamed/' + filename, 'w') as outfile:
r = csv.reader(inFile)
w = csv.writer(outfile)
header = list()
for i, row in enumerate(r):
if i == 0:
for col in row:
if col == 'tpep_pickup_datetime' or col == 'lpep_pickup_datetime':
header.append('pickup_datetime')
else:
header.append(col)
w.writerow(header)
else:
w.writerow(row)
os.remove('./projects/nyctaxi/tripdata/csv/' + filename)
else:
# if it can't find a file, it logs the taxi, year and month
# to look for it on the next run
configfile_writer.writerow([taxi, year, month])
logging.info(
'file not available: ' + taxi + '_tripdata_' + str(year) + '-' + str(month).zfill(
2) + '.csv. I will look for it on the next run.')
return csv_available
else:
return False
@task()
def transform(csv_available: bool):
if csv_available:
# transforms csv to parquet
with os.scandir('./projects/nyctaxi/tripdata/csv/renamed/') as files:
for file in files:
logging.info('loading to parquet ' + file.name)
df = | pd.read_csv('./projects/nyctaxi/tripdata/csv/renamed/' + file.name, low_memory=False) | pandas.read_csv |
'''
Recommend musical artists part II
Suppose you were a big fan of <NAME> - which other musicial artists might you like? Use your NMF features from the previous exercise and the cosine similarity to find similar musical artists. A solution to the previous exercise has been run, so norm_features is an array containing the normalized NMF features as rows. The names of the musical artists are available as the list artist_names.
INSTRUCTIONS
100XP
Import pandas as pd.
Create a DataFrame df from norm_features, using artist_names as an index.
Use the .loc[] accessor of df to select the row of '<NAME>'. Assign the result to artist.
Apply the .dot() method of df to artist to calculate the dot product of every row with artist. Save the result as similarities.
Print the result of the .nlargest() method of similarities to display the artists most similar to '<NAME>'.
'''
# Import pandas
import pandas as pd
# Create a DataFrame: df
df = | pd.DataFrame(norm_features, index=artist_names) | pandas.DataFrame |
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import os
os.environ['OMP_NUM_THREADS'] = '4'
import gc
import time
os.environ["CUDA_VISIBLE_DEVICES"] = '3'
import setproctitle
setproctitle.setproctitle('Kaggle@shihongzhi')
t0 = time.time()
path = '/data/mas/shihongzhi/Kaggle/'
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32'
}
print('load train....')
is_sample = 0
if(is_sample):
train_file = "train_sample.csv"; test_file = "test_sample.csv"
else:
train_file = "train.csv"; test_file = "test.csv"
train_df = pd.read_csv(path+train_file, dtype=dtypes, skiprows = range(1, 131886954), usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'], parse_dates=['click_time'])
print('load test....')
test_df = pd.read_csv(path+test_file, dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'], parse_dates=['click_time'])
len_train = len(train_df)
train_df=train_df.append(test_df)
del test_df; gc.collect()
print('click time....')
train_df['click_time'] = (train_df['click_time'].astype(np.int64) // 10 ** 9).astype(np.int32)
train_df['next_click'] = (train_df.groupby(['ip', 'app', 'device', 'os']).click_time.shift(-1) - train_df.click_time).astype(np.float32)
train_df['next_click'].fillna((train_df['next_click'].mean()), inplace=True)
print('hour, day, wday....')
train_df['hour'] = | pd.to_datetime(train_df.click_time) | pandas.to_datetime |
from sqlalchemy import create_engine
import pandas as pd
import datetime
import config
import pmdarima as pm
import numpy as np
import arch
import statistics
import traceback
pd.set_option('display.max_columns', None)
def initializer(symbol):
# Get Data
engine = create_engine(config.psql)
num_data_points = 255
one_year_ago = (datetime.datetime.utcnow().date() - datetime.timedelta(days=num_data_points * 1.45)).strftime("%Y-%m-%d")
query = f"select distinct * from stockdata_hist where symbol = '{symbol}' and tdate > '{one_year_ago}' AND (CAST(tdate AS TIME) = '20:00') limit {num_data_points}"
df = pd.read_sql_query(query, con=engine).sort_values(by='tdate', ascending=True)
# Get Forecast Range
steps = 5
today = df['tdate'].iloc[-1]
end_prediction_date = today + datetime.timedelta(days=steps)
end_friday = end_prediction_date + datetime.timedelta((4-end_prediction_date.weekday()) % 7)
tomorrow = today+datetime.timedelta(days=1)
date_range = pd.date_range(tomorrow, end_friday, freq="B")
period = len(pd.date_range(tomorrow, end_friday, freq="B"))
return df, tomorrow, date_range, period, engine
def arima(symbol, df, period, date_range):
df['tdate'] = pd.to_datetime(df['tdate'])
df.set_index(df['tdate'], inplace=True)
y = df['tick_close']
# Model ARIMA parameters
# model = pm.auto_arima(y, error_action='ignore', trace=True,
# suppress_warnings=True, maxiter=10,
# seasonal=True, m=50)
# print(type(model))
# print("get params:")
# print(model.get_params()['order'])
# print(type(model.get_params()['order']))
# print(model.summary())
m = 7
order = (1, 1, 1)
sorder = (0, 0, 1, m)
model = pm.arima.ARIMA(order, seasonal_order=sorder,
start_params=None, method='lbfgs', maxiter=50,
suppress_warnings=True, out_of_sample_size=0, scoring='mse',
scoring_args=None, trend=None, with_intercept=True)
model.fit(y)
# Forecast
forecasts = model.predict(n_periods=period, return_conf_int=True) # predict N steps into the future
flatten = forecasts[1].tolist()
results_df = pd.DataFrame(flatten, columns=['arima_low', 'arima_high'])
results_df['arima_forecast'] = forecasts[0]
results_df['tdate'] = date_range
results_df['uticker'] = symbol
results_df['arima_order'] = f"{order} {sorder}"
results_df['last_price'] = df['tick_close'][-1]
results_df['last_vwap'] = df['vwap'][-1]
results_df['arima_diff'] = (results_df['arima_forecast']-results_df['last_price'])/results_df['last_price']
results_df = results_df[['uticker', 'tdate', 'arima_low', 'arima_forecast', 'arima_high', 'arima_order', 'last_price', 'last_vwap', 'arima_diff']]
return results_df
def garch_model(df, period, date_range):
df = df.sort_index(ascending=True)
df['tdate'] = pd.to_datetime(df['tdate'])
df.set_index(df['tdate'], inplace=True)
market = df['tick_close']
returns = market.pct_change().dropna()
garch = arch.arch_model(returns, vol="GARCH", p=1, q=1, dist="normal")
fit_model = garch.fit(update_freq=1)
forecasts = fit_model.forecast(horizon=period, method='analytic', reindex=False)
f_mean = forecasts.mean.iloc[0:].iloc[0].reset_index().iloc[:, 1]
f_vol = np.sqrt(forecasts.variance.iloc[0:]).iloc[0].reset_index().iloc[:, 1]
f_res = np.sqrt(forecasts.residual_variance.iloc[0:]).iloc[0].reset_index().iloc[:, 1]
h_vol = statistics.stdev(returns.iloc[::-1])
h_mean = statistics.mean(returns.iloc[::-1])
temp_df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from functools import reduce
#Let's import some data from csv files to pandas dataframes!
cgm = pd.read_csv('./data/cgm_aligned.csv')
hr = pd.read_csv('./data/hr_aligned.csv')
meal = | pd.read_csv('./data/meal_aligned.csv') | pandas.read_csv |
#!/usr/bin/env python3
from datetime import datetime
from geoedfframework.GeoEDFPlugin import GeoEDFPlugin
from geoedfframework.utils.GeoEDFError import GeoEDFError
from .helper import GeomHelper, ColorHelper
"""Module to create map of a stream reach based on a specified NWIS (USGS) site and range (up/down stream),
including various feature layers derived from USGS NLDI navigation along the stream,
its tributaries and associated drainage basins, while also extracting data for further visualization.
Input parameter include:
nwis_site ['USGS-03206000']
um_dist [50]
dm_dist [25]
begin_date = [1/1/xx, where xx is 3 years past]
end_date = [today]
ignore_wqp_dates [True, all dates]
"""
class WQPMap(GeoEDFPlugin):
# Required inputs:
# nwis_site: NWIS (USGS) Station
# Optional inputs:
# um_dist: Upstream distance (km) along main stream from NWIS Station to traverse [50]
# dm_dist: Downstream distance (km) to traverse [25]
# begin_date: Beginning date (mm/dd/YYYY) to query properties [1/1/CurrentYear-3]
# end_data: End date to query properties [CurrentDay]
# ignore_wqp_dates: Option (True/False) to ignore above date for WQP properties [True]
__required_params = ['nwis_site']
__optional_params = ['um_dist','dm_dist','begin_date','end_date','ignore_wqp_dates']
# we use just kwargs since this makes it easier to instantiate the object from the
# GeoEDFProcessor class
def __init__(self, **kwargs):
#list to hold all parameter names
self.provided_params = self.__required_params + self.__optional_params
# check that all required params have been provided
for param in self.__required_params:
if param not in kwargs:
raise GeoEDFError('Required parameter %s for SimpleDataClean not provided' % param)
# set all required parameters
for key in self.__required_params:
setattr(self,key,kwargs.get(key))
# set optional parameters
for key in self.__optional_params:
if key == 'um_dist':
val = kwargs.get(key,50)
setattr(self,key,val)
continue
if key == 'dm_dist':
val = kwargs.get(key,25)
setattr(self,key,val)
continue
if key == 'begin_date':
val = kwargs.get(key,None)
if val is not None:
val = datetime.strptime(val,"%m/%d/%Y")
else:
val = kwargs.get(key,datetime(datetime.now().year-3, 1, 1))
setattr(self,key,val)
continue
if key == 'end_date':
val = kwargs.get(key,None)
if val is not None:
val = datetime.strptime(val,"%m/%d/%Y")
else:
val = datetime.now()
setattr(self,key,val)
continue
if key == 'ignore_wqp_dates':
val = kwargs.get(key,True)
setattr(self,key,val)
continue
#if key not provided in optional arguments, defaults value to None
setattr(self,key,kwargs.get(key,None))
# super class init
super().__init__()
# The process method that generates the map
def process(self):
# Get things set up
from os import path
# Enable numerical arrays and matrices with NumPy
import numpy as np
# Enable R-style DataFrames with Pandas
import pandas as pd
# Enable Math functions
import math
# Enable working with Dates and Times
from datetime import datetime
# Enable geospatial DataFrames with GeoPandas (built on Fiona, which is built on GDAL/OGR)
import geopandas as gpd
# Enable other geospatial functions using Shapely
from shapely.geometry import Point, Polygon
# Enable Leatlet.JS-based mapping with Folium
import folium
from folium import IFrame
import folium.plugins as plugins
# Enable HTTP requests and parsing of JSON results
import requests
import json
# Set parameters
NWIS_SITE = self.nwis_site
UM_DIST = self.um_dist
DM_DIST = self.dm_dist
BEGIN_DATE = "{0:02d}-{1:02d}-{2:4d}".format(self.begin_date.month,self.begin_date.day,self.begin_date.year)
END_DATE = "{0:02d}-{1:02d}-{2:4d}".format(self.end_date.month,self.end_date.day,self.end_date.year)
IGNORE_WQP_DATES = self.ignore_wqp_dates
# URLs for REST Web Services
USGS_NLDI_WS = "https://labs.waterdata.usgs.gov/api/nldi/linked-data" # USGS NLDI REST web services
NWIS_SITE_URL = USGS_NLDI_WS+"/nwissite/"+NWIS_SITE
NWIS_SITE_NAV = NWIS_SITE_URL+"/navigate"
TNM_WS = "https://hydro.nationalmap.gov/arcgis/rest/services" # The National Map REST web services
ARCGIS_WS = "http://server.arcgisonline.com/arcgis/rest/services" # ARCGIS Online REST web services
# Set Output Directory
if (self.target_path == None):
OUT_DIR = "."
else:
OUT_DIR = self.target_path
try:
# Get Lat/Lon coordinates of starting site (NWIS station)
nwis_site_json = gpd.read_file(NWIS_SITE_URL)
nwis_site_geom = nwis_site_json.iloc[0]['geometry']
nwis_site_coord = [nwis_site_geom.y, nwis_site_geom.x]
# Generate map
river_map = folium.Map(nwis_site_coord,zoom_start=10,tiles=None)
plugins.ScrollZoomToggler().add_to(river_map);
plugins.Fullscreen(
position='bottomright',
title='Full Screen',
title_cancel='Exit Full Screen',
force_separate_button=True
).add_to(river_map);
# Add sites within reach using NLDI web services at USGS
# Popup parameters
width = 500
height = 120
max_width = 1000
# Main Stream
folium.GeoJson(NWIS_SITE_NAV+"/UM?distance="+str(UM_DIST),name="Main Stream (up)",show=True,control=False).add_to(river_map);
folium.GeoJson(NWIS_SITE_NAV+"/DM?distance="+str(DM_DIST),name="Main Stream (down)",show=True,control=False).add_to(river_map);
# NWIS Sites
fg_nwis = folium.FeatureGroup(name="USGS (NWIS) Sites",overlay=True,show=False)
color = 'darkred'
icon = 'dashboard'
nwis_sites_dm = gpd.read_file(NWIS_SITE_NAV+"/DM/nwissite?distance="+str(DM_DIST))
nwis_sites_um = gpd.read_file(NWIS_SITE_NAV+"/UM/nwissite?distance="+str(UM_DIST))
nwis_sites = gpd.GeoDataFrame(pd.concat([nwis_sites_dm,nwis_sites_um], ignore_index=True), crs=nwis_sites_dm.crs) # TODO: eliminate duplicate for anchor site
for i, nwis_site in nwis_sites.iterrows():
coord = [nwis_site.geometry.y,nwis_site.geometry.x]
label = 'NWIS Station: '+nwis_site.identifier
html = label
html += '<br>{0:s}'.format(nwis_site['name'])
html += '<br><a href=\"{0:s}\" target=\"_blank\">{1:s}</a>'.format(nwis_site.uri+'/#parameterCode=00065&startDT='+BEGIN_DATE+'&endDT='+END_DATE,nwis_site.uri)
html += '<br>Lat: {0:.4f}, Lon: {1:.4f}'.format(nwis_site.geometry.y,nwis_site.geometry.x)
html += '<br>Comid: {0:s}'.format(nwis_site.comid)
iframe = folium.IFrame(html,width=width,height=height)
popup = folium.Popup(iframe,max_width=max_width)
fg_nwis.add_child(folium.Marker(location=coord,icon=folium.Icon(color=color,icon=icon),popup=popup,tooltip=label));
fg_nwis.add_to(river_map)
# WQP Stations
fg_wqp = folium.FeatureGroup(name="WQP Stations",overlay=True,show=False)
color = 'darkgreen'
radius = 3
wqp_sites_dm = gpd.read_file(NWIS_SITE_NAV+"/DM/wqp?distance="+str(DM_DIST))
wqp_sites_um = gpd.read_file(NWIS_SITE_NAV+"/UM/wqp?distance="+str(UM_DIST))
wqp_sites = gpd.GeoDataFrame(pd.concat([wqp_sites_dm,wqp_sites_um], ignore_index=True), crs=wqp_sites_dm.crs)
for i, wqp_site in wqp_sites.iterrows():
coord = [wqp_site.geometry.y,wqp_site.geometry.x]
label = 'WQP Station: '+wqp_site.identifier
html = label
html += '<br>{0:s}'.format(wqp_site['name'])
html += '<br><a href=\"{0:s}\" target=\"_blank\">{1:s}</a>'.format(wqp_site.uri,wqp_site.uri)
html += '<br>Lat: {0:.4f}, Lon: {1:.4f}'.format(wqp_site.geometry.y,wqp_site.geometry.x)
html += '<br>Comid: {0:s}'.format(wqp_site.comid)
iframe = folium.IFrame(html,width=width,height=height)
popup = folium.Popup(iframe,max_width=max_width)
fg_wqp.add_child(folium.CircleMarker(location=coord,radius=radius,color=color,popup=popup,tooltip=label));
fg_wqp.add_to(river_map);
# Add HUC12 Pour Points, *differential* drainage basins, HUC4-10 boundaries associated with each
fg_huc12pp = folium.FeatureGroup(name="HUC12 Pour Points",overlay=True,show=False)
fg_basins = folium.FeatureGroup(name="Drainage Basins",overlay=True,show=False)
fg_wbd4 = folium.FeatureGroup(name="HUC4 Boundaries",overlay=True,show=False)
fg_wbd6 = folium.FeatureGroup(name="HUC6 Boundaries",overlay=True,show=False)
fg_wbd8 = folium.FeatureGroup(name="HUC8 Boundaries",overlay=True,show=False)
fg_wbd10 = folium.FeatureGroup(name="HUC10 Boundaries",overlay=True,show=False)
fg_wbd12 = folium.FeatureGroup(name="HUC12 Boundaries",overlay=True,show=False)
huc4_list = []
huc6_list = []
huc8_list = []
huc10_list = []
huc12_list = []
color = 'darkblue'
radius = 3
try:
huc12pp_sites_dm = gpd.read_file(NWIS_SITE_NAV+"/DM/huc12pp?distance="+str(DM_DIST),driver='GeoJSON')
except Exception as ex:
print("An exception of type {0} occurred. Arguments:\n{1!r}".format(type(ex).__name__, ex.args))
huc12pp_sites_dm = gpd.GeoDataFrame()
try:
huc12pp_sites_um = gpd.read_file(NWIS_SITE_NAV+"/UM/huc12pp?distance="+str(UM_DIST),driver='GeoJSON')
except Exception as ex:
print("An exception of type {0} occurred. Arguments:\n{1!r}".format(type(ex).__name__, ex.args))
huc12pp_sites_um = gpd.GeoDataFrame()
huc12pp_sites = gpd.GeoDataFrame(pd.concat([huc12pp_sites_dm,huc12pp_sites_um], ignore_index=True), crs=huc12pp_sites_dm.crs)
n_segs = len(huc12pp_sites)-1
# Sort sites by decreasing area of drainage basin
def get_area(x):
x_basin = gpd.read_file(USGS_NLDI_WS+"/comid/"+x+"/basin")
return int(round(x_basin.iloc[0].geometry.area,3)*1000)
huc12pp_sites['area']=huc12pp_sites.apply(lambda x: get_area(x.comid), axis=1)
huc12pp_sites.set_index(['area'],inplace=True,drop=True)
huc12pp_sites.sort_index(inplace=True,ascending=False)
i = 0
for area, huc12pp_site in huc12pp_sites.iterrows():
# Add to HUC12 PP to Site table in database
# if (DB_NOT_FOUND):
# huc12pp = Site(type='HUC12PP',name=huc12pp_site.identifier,desc=huc12pp_site['name'],url=huc12pp_site.uri,comid=huc12pp_site.comid,geom='POINT({0:.4f} {1:.4f})'.format(huc12pp_site.geometry.x,huc12pp_site.geometry.y))
# spatialite_session.add(huc12pp)
# Get HUC12 PP drainage basin
basin_url = USGS_NLDI_WS+"/comid/{0:s}/basin".format(huc12pp_site.comid)
try:
basin = gpd.read_file(basin_url,driver='GeoJSON')
except Exception as ex:
print("An exception of type {0} occurred. Arguments:\n{1!r}".format(type(ex).__name__, ex.args))
i = i + 1
continue
basin_area = GeomHelper.geom_area(basin)
basin_diff_area = basin_area
# Get HUC12 watershed boudary (WBD)
wbd12_url = TNM_WS+"/wbd/MapServer/6/query?where=HUC12%3D%27{0:s}%27&outFields=NAME%2CHUC12%2CAREASQKM&f=geojson".format(huc12pp_site.identifier)
try:
wbd12 = gpd.read_file(wbd12_url,driver='GeoJSON')
except Exception as ex:
print("An exception of type {0} occurred. Arguments:\n{1!r}".format(type(ex).__name__, ex.args))
i = i + 1
continue
if i < n_segs:
# Add HUC12 boundary a feature layer
style_function = lambda x: {'color': 'darkgreen', 'weight': 1, 'fillColor': 'green', 'fillOpacity': 0.1}
highlight_function = lambda x: {'color':'yellow', 'weight':2}
tooltip = "HUC12: {0:s} ({1:s}), Area: {2:.2f}".format(wbd12.iloc[0]['huc12'],wbd12.iloc[0]['name'],GeomHelper.geom_area(wbd12)[0])
wbd12_feature = folium.GeoJson(wbd12.iloc[0].geometry,style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd12.add_child(wbd12_feature);
huc12_list.append(huc12pp_site.identifier)
if i > 0:
# Generate and show difference between sussessive drainage basins -- this is for the previous (downstream) basin, associated with previous HUC12 PP
basin_diff = gpd.overlay(basin_prev,basin,how='difference')
basin_diff_area = GeomHelper.geom_area(basin_diff)
style_function = lambda x: {'color': 'red', 'weight': 1, 'fillColor': 'blue', 'fillOpacity': 0.1}
highlight_function = lambda x: {'color':'yellow', 'weight':3}
tooltip = "Differential Drainage Basin for HUC12 Pour Point: {0:s} ({1:s}), Area: {2:.2f}".format(wbd12.iloc[0]['huc12'],wbd12.iloc[0]['name'],basin_diff_area[0])
basin_diff_feature = folium.GeoJson(basin_diff.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_basins.add_child(basin_diff_feature);
if i == n_segs:
# Show large basin of first (highest upstream) pour point
style_function = lambda x: {'color': 'gray', 'weight': 1, 'fillColor': 'gray', 'fillOpacity': 0.1}
highlight_function = lambda x: {'color':'yellow', 'weight':1}
tooltip = "Total Drainage Basin for HUC12 Pour Point: {0:s} ({1:s}), Area: {2:.2f}".format(wbd12.iloc[0]['huc12'],wbd12.iloc[0]['name'],basin_area[0])
basin_feature = folium.GeoJson(basin.iloc[0].geometry,style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_basins.add_child(basin_feature);
# Get HUC10 containing HUC12 and add that to another feature layer
huc10_identifier = huc12pp_prev[:-2]
wbd10_url = TNM_WS+"/wbd/MapServer/5/query?where=HUC10%3D%27{0:s}%27&outFields=NAME%2CHUC10%2CAREASQKM&f=geojson".format(huc10_identifier)
try:
wbd10 = gpd.read_file(wbd10_url)
except:
pass
else:
basin_huc10_overlap = gpd.overlay(wbd10,basin_diff,how='intersection')
style_function = lambda x: {'color': 'darkgreen', 'weight': 1, 'fillColor': 'green', 'fillOpacity': 0.05}
highlight_function = lambda x: {'color':'yellow', 'weight':2}
tooltip = "HUC10 Overlap: {0:s} ({1:s}), Area: {2:.2f}".format(wbd10.iloc[0]['huc10'],wbd10.iloc[0]['name'],GeomHelper.geom_area(basin_huc10_overlap)[0])
wbd10_feature = folium.GeoJson(basin_huc10_overlap.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd10.add_child(wbd10_feature);
if (huc10_identifier not in huc10_list):
tooltip = "HUC10: {0:s} ({1:s}), Area: {2:.2f}".format(wbd10.iloc[0]['huc10'],wbd10.iloc[0]['name'],wbd10.iloc[0].areasqkm)
wbd10_feature = folium.GeoJson(wbd10.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd10.add_child(wbd10_feature);
huc10_list.append(huc10_identifier)
# Get HUC8 containing HUC12 and add that to another feature layer
huc8_identifier = huc12pp_prev[:-4]
wbd8_url = TNM_WS+"/wbd/MapServer/4/query?where=HUC8%3D%27{0:s}%27&outFields=NAME%2CHUC8%2CAREASQKM&f=geojson".format(huc8_identifier)
try:
wbd8 = gpd.read_file(wbd8_url)
except:
pass
else:
basin_huc8_overlap = gpd.overlay(wbd8,basin_diff,how='intersection')
style_function = lambda x: {'color': 'darkgreen', 'weight': 1, 'fillColor': 'green', 'fillOpacity': 0.05}
highlight_function = lambda x: {'color':'yellow', 'weight':2}
tooltip = "HUC8 Overlap: {0:s} ({1:s}), Area: {2:.2f}".format(wbd8.iloc[0]['huc8'],wbd8.iloc[0]['name'],GeomHelper.geom_area(basin_huc8_overlap)[0])
wbd8_feature = folium.GeoJson(basin_huc8_overlap.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd8.add_child(wbd8_feature);
if (huc8_identifier not in huc8_list):
tooltip = "HUC8: {0:s} ({1:s}), Area: {2:.2f}".format(wbd8.iloc[0]['huc8'],wbd8.iloc[0]['name'],wbd8.iloc[0].areasqkm)
wbd8_feature = folium.GeoJson(wbd8.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd8.add_child(wbd8_feature);
huc8_list.append(huc8_identifier)
# Get HUC6 containing HUC12 and add that to another feature layer
huc6_identifier = huc12pp_prev[:-6]
wbd6_url = TNM_WS+"/wbd/MapServer/3/query?where=HUC6%3D%27{0:s}%27&outFields=NAME%2CHUC6%2CAREASQKM&f=geojson".format(huc6_identifier)
try:
wbd6 = gpd.read_file(wbd6_url)
except:
pass
else:
basin_huc6_overlap = gpd.overlay(wbd6,basin_diff,how='intersection')
style_function = lambda x: {'color': 'darkgreen', 'weight': 1, 'fillColor': 'green', 'fillOpacity': 0.05}
highlight_function = lambda x: {'color':'yellow', 'weight':2}
tooltip = "HUC6 Overlap: {0:s} ({1:s}), Area: {2:.2f}".format(wbd6.iloc[0]['huc6'],wbd6.iloc[0]['name'],GeomHelper.geom_area(basin_huc6_overlap)[0])
wbd6_feature = folium.GeoJson(basin_huc6_overlap.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd6.add_child(wbd6_feature);
if (huc6_identifier not in huc6_list):
tooltip = "HUC6: {0:s} ({1:s}), Area: {2:.2f}".format(wbd6.iloc[0]['huc6'],wbd6.iloc[0]['name'],wbd6.iloc[0].areasqkm)
wbd6_feature = folium.GeoJson(wbd6.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd6.add_child(wbd6_feature);
huc6_list.append(huc6_identifier)
# Get HUC4 containing HUC12 and add that to another feature layer
huc4_identifier = huc12pp_prev[:-8]
wbd4_url = TNM_WS+"/wbd/MapServer/2/query?where=HUC4%3D%27{0:s}%27&outFields=NAME%2CHUC4%2CAREASQKM&f=geojson".format(huc4_identifier)
try:
wbd4 = gpd.read_file(wbd4_url)
except:
pass
else:
basin_huc4_overlap = gpd.overlay(wbd4,basin_diff,how='intersection')
style_function = lambda x: {'color': 'darkgreen', 'weight': 1, 'fillColor': 'green', 'fillOpacity': 0.05}
highlight_function = lambda x: {'color':'yellow', 'weight':2}
tooltip = "HUC4 Overlap: {0:s} ({1:s}), Area: {2:.2f}".format(wbd4.iloc[0]['huc4'],wbd4.iloc[0]['name'],GeomHelper.geom_area(basin_huc4_overlap)[0])
wbd4_feature = folium.GeoJson(basin_huc4_overlap.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd4.add_child(wbd4_feature);
if (huc4_identifier not in huc4_list):
tooltip = "HUC4: {0:s} ({1:s}), Area: {2:.2f}".format(wbd4.iloc[0]['huc4'],wbd4.iloc[0]['name'],wbd4.iloc[0].areasqkm)
wbd4_feature = folium.GeoJson(wbd4.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd4.add_child(wbd4_feature);
huc4_list.append(huc4_identifier)
basin_prev = basin
huc12pp_prev = huc12pp_site.identifier
# HUC12 Pour Point markers
coord = [huc12pp_site.geometry.y,huc12pp_site.geometry.x]
label = 'Pour Point for HUC12: '+wbd12.iloc[0]['name']
html = label
html += '<br>Indentifier: {0:s}'.format(huc12pp_site.identifier)
html += '<br>Lat: {0:.2f}, Lon: {1:.2f}'.format(huc12pp_site.geometry.y,huc12pp_site.geometry.x)
html += '<br>Comid: {0:s}'.format(huc12pp_site.comid)
html += '<br>Area Total: {0:.2f}'.format(basin_area[0])
html += '<br>Area Difference: {0:.2f}'.format(basin_diff_area[0])
iframe = folium.IFrame(html,width=width,height=height)
popup = folium.Popup(iframe,max_width=max_width)
fg_huc12pp.add_child(folium.CircleMarker(location=coord,radius=radius,color=color,popup=popup,tooltip=label));
i = i + 1
basin_prev = basin
huc12pp_prev = huc12pp_site.identifier
# Do HUC2-10s for final upstream basin
huc10_identifier = huc12pp_prev[:-2]
wbd10_url = TNM_WS+"/wbd/MapServer/5/query?where=HUC10%3D%27{0:s}%27&outFields=NAME%2CHUC10%2CAREASQKM&f=geojson".format(huc10_identifier)
try:
wbd10 = gpd.read_file(wbd10_url)
except:
pass
else:
basin_huc10_overlap = gpd.overlay(wbd10,basin_diff,how='intersection')
style_function = lambda x: {'color': 'darkgreen', 'weight': 1, 'fillColor': 'green', 'fillOpacity': 0.05}
highlight_function = lambda x: {'color':'yellow', 'weight':2}
tooltip = "HUC10 Overlap: {0:s} ({1:s}), Area: {2:.2f}".format(wbd10.iloc[0]['huc10'],wbd10.iloc[0]['name'],GeomHelper.geom_area(basin_huc10_overlap)[0])
wbd10_feature = folium.GeoJson(basin_huc10_overlap.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd10.add_child(wbd10_feature);
if (huc10_identifier not in huc10_list):
tooltip = "HUC10: {0:s} ({1:s}), Area: {2:.2f}".format(wbd10.iloc[0]['huc10'],wbd10.iloc[0]['name'],wbd10.iloc[0].areasqkm)
wbd10_feature = folium.GeoJson(wbd10.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd10.add_child(wbd10_feature);
huc10_list.append(huc10_identifier)
huc8_identifier = huc12pp_prev[:-4]
wbd8_url = TNM_WS+"/wbd/MapServer/4/query?where=HUC8%3D%27{0:s}%27&outFields=NAME%2CHUC8%2CAREASQKM&f=geojson".format(huc8_identifier)
try:
wbd8 = gpd.read_file(wbd8_url)
except:
pass
else:
basin_huc8_overlap = gpd.overlay(wbd8,basin_diff,how='intersection')
style_function = lambda x: {'color': 'darkgreen', 'weight': 1, 'fillColor': 'green', 'fillOpacity': 0.05}
highlight_function = lambda x: {'color':'yellow', 'weight':2}
tooltip = "HUC8 Overlap: {0:s} ({1:s}), Area: {2:.2f}".format(wbd8.iloc[0]['huc8'],wbd8.iloc[0]['name'],GeomHelper.geom_area(basin_huc8_overlap)[0])
wbd8_feature = folium.GeoJson(basin_huc8_overlap.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd8.add_child(wbd8_feature);
if (huc8_identifier not in huc8_list):
tooltip = "HUC8: {0:s} ({1:s}), Area: {2:.2f}".format(wbd8.iloc[0]['huc8'],wbd8.iloc[0]['name'],wbd8.iloc[0].areasqkm)
wbd8_feature = folium.GeoJson(wbd8.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd8.add_child(wbd8_feature);
huc8_list.append(huc8_identifier)
huc6_identifier = huc12pp_prev[:-6]
wbd6_url = TNM_WS+"/wbd/MapServer/3/query?where=HUC6%3D%27{0:s}%27&outFields=NAME%2CHUC6%2CAREASQKM&f=geojson".format(huc6_identifier)
try:
wbd6 = gpd.read_file(wbd6_url)
except:
pass
else:
basin_huc6_overlap = gpd.overlay(wbd6,basin_diff,how='intersection')
style_function = lambda x: {'color': 'darkgreen', 'weight': 1, 'fillColor': 'green', 'fillOpacity': 0.05}
highlight_function = lambda x: {'color':'yellow', 'weight':2}
tooltip = "HUC6 Overlap: {0:s} ({1:s}), Area: {2:.2f}".format(wbd6.iloc[0]['huc6'],wbd6.iloc[0]['name'],GeomHelper.geom_area(basin_huc6_overlap)[0])
wbd6_feature = folium.GeoJson(basin_huc6_overlap.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd6.add_child(wbd6_feature);
if (huc6_identifier not in huc6_list):
tooltip = "HUC6: {0:s} ({1:s}), Area: {2:.2f}".format(wbd6.iloc[0]['huc6'],wbd6.iloc[0]['name'],wbd6.iloc[0].areasqkm)
wbd6_feature = folium.GeoJson(wbd6.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd6.add_child(wbd6_feature);
huc6_list.append(huc6_identifier)
huc4_identifier = huc12pp_prev[:-8]
wbd4_url = TNM_WS+"/wbd/MapServer/2/query?where=HUC4%3D%27{0:s}%27&outFields=NAME%2CHUC4%2CAREASQKM&f=geojson".format(huc4_identifier)
try:
wbd4 = gpd.read_file(wbd4_url)
except:
pass
else:
basin_huc4_overlap = gpd.overlay(wbd4,basin_diff,how='intersection')
style_function = lambda x: {'color': 'darkgreen', 'weight': 1, 'fillColor': 'green', 'fillOpacity': 0.05}
highlight_function = lambda x: {'color':'yellow', 'weight':2}
tooltip = "HUC4 Overlap: {0:s} ({1:s}), Area: {2:.2f}".format(wbd4.iloc[0]['huc4'],wbd4.iloc[0]['name'],GeomHelper.geom_area(basin_huc4_overlap)[0])
wbd4_feature = folium.GeoJson(basin_huc4_overlap.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd4.add_child(wbd4_feature);
if (huc4_identifier not in huc4_list):
tooltip = "HUC4: {0:s} ({1:s}), Area: {2:.2f}".format(wbd4.iloc[0]['huc4'],wbd4.iloc[0]['name'],wbd4.iloc[0].areasqkm)
wbd4_feature = folium.GeoJson(wbd4.iloc[0].geometry.buffer(-0.001).buffer(0.001),style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_wbd4.add_child(wbd4_feature);
huc4_list.append(huc4_identifier)
fg_huc12pp.add_to(river_map);
fg_basins.add_to(river_map);
fg_wbd4.add_to(river_map);
fg_wbd6.add_to(river_map);
fg_wbd8.add_to(river_map);
fg_wbd10.add_to(river_map);
fg_wbd12.add_to(river_map);
# Add HUC12s that are contained in the drainage basins
# * TODO: Search by shape
fg_huc12_plus = folium.FeatureGroup(name="Other HUC12s in HUC10",overlay=True,show=False)
i = 0
n_segs = len(huc12pp_sites)
for area, huc12pp_site in huc12pp_sites.iterrows():
if i >= n_segs - 1:
break
basin_url = USGS_NLDI_WS+"/comid/{0:s}/basin".format(huc12pp_site.comid)
try:
basin = gpd.read_file(basin_url,driver='GeoJSON')
except:
i = i + 1
continue
# Get HUC12 watershed boundaries sharing the same HUC10
huc12_plus_url = TNM_WS+"/wbd/MapServer/6/query?where=HUC12%20LIKE%20%27{0:s}%25%27&outFields=NAME%2CHUC12%2CSHAPE_Length&f=geojson".format(huc12pp_site.identifier[:-2])
try:
huc12_plus = gpd.read_file(huc12_plus_url,driver='GeoJSON')
except:
i = i + 1
continue
huc12_basin_overlap = gpd.overlay(huc12_plus,basin,how='intersection')
if (not huc12_basin_overlap.empty):
for k, huc12_in_basin in huc12_basin_overlap.iterrows():
huc12_wbd_url = TNM_WS+"/wbd/MapServer/6/query?where=HUC12%3D%27{0:s}%27&outFields=NAME%2CHUC12%2CSHAPE_Length&f=geojson".format(huc12_in_basin.huc12)
huc12_wbd = gpd.read_file(huc12_wbd_url,driver='GeoJSON')
huc12_overlap = gpd.overlay(huc12_wbd,basin,how='intersection')
if ((not huc12_overlap.empty) and (huc12_overlap.iloc[0].geometry.area > 0.001) and (huc12_in_basin.huc12 not in huc12_list)):
huc12_list.append(huc12_in_basin.huc12)
# Add HUC12 WBD boundary
style_function = lambda x: {'color': 'darkgreen', 'weight': 1, 'fillColor': 'green', 'fillOpacity': 0.05}
highlight_function = lambda x: {'color':'yellow', 'weight':2}
tooltip = "HUC12: {0:s} ({1:s}), Area: {2:.2f}".format(huc12_wbd.iloc[0]['huc12'],huc12_wbd.iloc[0]['name'],GeomHelper.geom_area(huc12_wbd)[0])
huc12_plus_feature = folium.GeoJson(huc12_wbd.iloc[0].geometry,style_function=style_function,highlight_function=highlight_function,tooltip=tooltip)
fg_huc12_plus.add_child(huc12_plus_feature);
i = i + 1
fg_huc12_plus.add_to(river_map);
# Add HUC12s that are contained in nearby HUC10s (in same HUC8 as the HUC12 Pour Point) and in the associated drainage basin
# * TODO: How to exclude more distant HUC10s?
# Add tributaries upstream of HUC12 Pour Points
fg_utpp = folium.FeatureGroup(name="Tribs upstream of PPs",overlay=True,show=False)
for huc12 in huc12_list:
wbd12_url = TNM_WS+"/wbd/MapServer/6/query?where=HUC12%3D%27{0:s}%27&f=geojson".format(huc12)
try:
wbd12 = gpd.read_file(wbd12_url)
except:
continue
distance = int(round(GeomHelper.geom_diagonal(wbd12),0)) # May need to exend for winding streams
#distance = 35
tribs = folium.GeoJson(USGS_NLDI_WS+"/huc12pp/{0:s}/navigate/UT?distance={1:d}".format(huc12,distance))
fg_utpp.add_child(tribs);
fg_utpp.add_to(river_map);
# Add water quality (WQP) properties
wqp_property_series = []
for i, wqp_site in wqp_sites.iterrows():
if (IGNORE_WQP_DATES):
wqp_properties = pd.read_csv("https://www.waterqualitydata.us/data/Result/search?siteid="+wqp_site.identifier+"&mimeType=csv")
else:
wqp_properties = | pd.read_csv("https://www.waterqualitydata.us/data/Result/search?siteid="+wqp_site.identifier+"&startDateLo="+BEGIN_DATE+"&startDateHi="+END_DATE+"&mimeType=csv") | pandas.read_csv |
import numpy as np
import pandas as pd
from numpy.random import default_rng
#///////////////////// miscellaneous functions starts here
def cAngle(i):
x=i % 360
return x
def weib(x,A,k): #A is the scale and k is the shape factor
return (k / A) * (x / A)**(k - 1) * np.exp(-(x / A)**k) #This function show the probabilty of occurence of a specific wind speed.
def weib_cumulative(x,A,k): #A is the scale and k is the shape factor
return 1-np.exp(-1*(x/A)**k) #This function show the probabilty of occurence of a specific wind speed.
#///////////////////// miscellaneous functions ends here
class environment:
"""
Creates the stand-alone environment and returns it with the given unique ID. By default, wind speeds from 0 m/s to 50 m/s with an increment of 0.5 m/s and also 360 degree with 1 degree increment are also added. The temperature of 25 degree Celsius and pressure of 101325 Pa is assumed. See example below:
:param uniqueID: [*req*] the given unique ID.
:Example:
>>> Env = environment("C_Env")
>>> #Creates an environment without assigning it to any wind farm.
>>> print(Env.info.keys())
dict_keys(['Wind directions', 'Sectors', 'Wind speeds', 'Pressure', 'Temperature', 'Wind probability', 'Scale parameter of wind distribution', 'Shape parameter of wind distribution'])
>>> print(Env.info['Wind directions']) #doctest:+ELLIPSIS
[0, 1, 2, 3, ...]
>>> print(Env.info['Wind speeds']) # doctest:+ELLIPSIS
[0.0, 0.5, 1.0, 1.5, 2.0, ...]
\\----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
created_environments=[]
def __init__(self, uniqueID):
if uniqueID in environment.created_environments: #Checks if the environment ID is already taken
raise Exception ("The environment unique ID [" + str(uniqueID) + "] is already taken.")
else:
if type(uniqueID) == str and len(uniqueID.split())==1:
if uniqueID in globals().keys(): #Checks if the given unique Id is not in conflict with user's already assigned variables.
raise Exception ("Another object with the same uniqe ID globally exists. New environment not created.")
else:
globals()[uniqueID] = self #environment is dynamicall created and referenced with the unique ID to the users assigned variable.
environment.created_environments.append(uniqueID) #we append the created environment to the list
self.uID=uniqueID
else:
raise Exception("Unique ID should be a string without spaces.")
self.__conditionsDic={}
self.__conditionsDic["Wind directions"]=[i for i in range(0,360)] #degrees
self.__conditionsDic["Sectors"] = None
self.__conditionsDic["Wind speeds"]=[i for i in np.arange(0,30.5,0.5)] #m/s
self.__conditionsDic["Pressure"]= 101325 #pascals
self.__conditionsDic["Air Density [kg/m^3]"]=1.225
self.__conditionsDic["Temperature"]=25 #celcius
self.__conditionsDic["Wind probability"]=[] #how often the wind blows in this sector
self.__conditionsDic["Scale parameter of wind distribution"]=[] # the scale parameter of the wind distribution in the particular sector in m/s
self.__conditionsDic["Shape parameter of wind distribution"]=[] # the shape parameter of the wind distribution in the particular sector
self.windSectors=None
@property
def info(self):
"""
Returns all the defined conditions of the environment.
:param None:
:Example:
>>> dantysk=windfarm("DanTysk")
>>> env=environment("D_Env")
>>> print(env.info.keys())
dict_keys(['Wind directions', 'Sectors', 'Wind speeds', 'Pressure', 'Temperature', 'Wind probability', 'Scale parameter of wind distribution', 'Shape parameter of wind distribution'])
>>> print(env.info['Wind directions']) # doctest:+ELLIPSIS
[0, 1, 2, 3, ...]
>>> print(env.info['Wind speeds']) # doctest:+ELLIPSIS
[0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, ...]
\----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
return self.__conditionsDic
def windConditions(self,windProbability=[1/12 for i in range(12)], aParams=[7 for i in range(12)], kParams=[2.5 for i in range(12)]):
"""
Creates and assigns the given wind conditions to the related environment. Returns the result as a data frame.
Divides the 360 degrees to given number of sectors. By default it divides to 12 sectors and assigns the 12 standard names for every sector e.g. N_0 starts from 346 degrees and ends at 15 degrees.
:param windProbability: [*opt*] the probabiliyt of wind presence in each sector, by default equal to 1/12.
:param aParams: [*opt*] the scale factor of the weibull distribution of the wind in the sector, by default equal to 7 m/s .
:param kParams: [*opt*] the shape factor of the weibull distribution of the wind int the sector, by default equla to 2.5.
:Example:
>>> from PyWinda import pywinda as pw
>>> Env=pw.environment("C_Env2")
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
#TODO che the example of the windConditions purpose.
self.__conditionsDic["Wind probability"]=windProbability
self.__conditionsDic["Scale parameter of wind distribution"]=aParams
self.__conditionsDic["Shape parameter of wind distribution"]=kParams
def makeSectors(self,n=12,sectorNames=["N_0","NNE_30","NEN_60","E_90","ESE_120","SSE_150","S_180","SSW_210","WSW_240","W_270","WNW_300","NNW_330"]):#by default the function will divide the sector in 12 regions
"""
Creates the given sectors to the related environment. Returns the result as a data frame.
Divides the 360 degrees to given number of sectors. By default it divides to 12 sectors and assigns the 12 standard names for every sector e.g. N_0 starts from 346 degrees and ends at 15 degrees.
:param n: [*opt*] the number of sectors.
:param sectorNames: [*opt*] names of the sectors given by user or default names for n=12.
:Example:
>>> Env=environment("C_Env2")
>>> print(Env.makeSectors())
N_0 NNE_30 NEN_60 E_90 ... WSW_240 W_270 WNW_300 NNW_330
0 346.0 16.0 46.0 76.0 ... 226.0 256.0 286.0 316.0
1 347.0 17.0 47.0 77.0 ... 227.0 257.0 287.0 317.0
2 348.0 18.0 48.0 78.0 ... 228.0 258.0 288.0 318.0
3 349.0 19.0 49.0 79.0 ... 229.0 259.0 289.0 319.0
4 350.0 20.0 50.0 80.0 ... 230.0 260.0 290.0 320.0
5 351.0 21.0 51.0 81.0 ... 231.0 261.0 291.0 321.0
6 352.0 22.0 52.0 82.0 ... 232.0 262.0 292.0 322.0
7 353.0 23.0 53.0 83.0 ... 233.0 263.0 293.0 323.0
8 354.0 24.0 54.0 84.0 ... 234.0 264.0 294.0 324.0
9 355.0 25.0 55.0 85.0 ... 235.0 265.0 295.0 325.0
10 356.0 26.0 56.0 86.0 ... 236.0 266.0 296.0 326.0
11 357.0 27.0 57.0 87.0 ... 237.0 267.0 297.0 327.0
12 358.0 28.0 58.0 88.0 ... 238.0 268.0 298.0 328.0
13 359.0 29.0 59.0 89.0 ... 239.0 269.0 299.0 329.0
14 0.0 30.0 60.0 90.0 ... 240.0 270.0 300.0 330.0
15 1.0 31.0 61.0 91.0 ... 241.0 271.0 301.0 331.0
16 2.0 32.0 62.0 92.0 ... 242.0 272.0 302.0 332.0
17 3.0 33.0 63.0 93.0 ... 243.0 273.0 303.0 333.0
18 4.0 34.0 64.0 94.0 ... 244.0 274.0 304.0 334.0
19 5.0 35.0 65.0 95.0 ... 245.0 275.0 305.0 335.0
20 6.0 36.0 66.0 96.0 ... 246.0 276.0 306.0 336.0
21 7.0 37.0 67.0 97.0 ... 247.0 277.0 307.0 337.0
22 8.0 38.0 68.0 98.0 ... 248.0 278.0 308.0 338.0
23 9.0 39.0 69.0 99.0 ... 249.0 279.0 309.0 339.0
24 10.0 40.0 70.0 100.0 ... 250.0 280.0 310.0 340.0
25 11.0 41.0 71.0 101.0 ... 251.0 281.0 311.0 341.0
26 12.0 42.0 72.0 102.0 ... 252.0 282.0 312.0 342.0
27 13.0 43.0 73.0 103.0 ... 253.0 283.0 313.0 343.0
28 14.0 44.0 74.0 104.0 ... 254.0 284.0 314.0 344.0
29 15.0 45.0 75.0 105.0 ... 255.0 285.0 315.0 345.0
<BLANKLINE>
[30 rows x 12 columns]
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
sectorSpan = 360 / n
eachS2E=[i for i in np.arange(1 - sectorSpan / 2, 360, sectorSpan)] #this makes a set of starts to end of each sector such that first sector starts from 0+1-sectorSpan / 2 goes to 360 (excluding 360) and the distance between consecutive units is equal to sectorSpan. The +1 makes sure that the sector starts and ends in the correct place. For example sector E_90 with n=12 starts from 90-30+1=61 and ends at 90+30=120
sectorsDic = {}
sectorNamesToReturn=sectorNames #this by default, of course user can give his/her own names as well.
if n!=12: #After user give n other than 12, user can either give sectorNames or leave it, if left the script makes names automatically by assigning half othe span of the sector as the name of the sector
if len(sectorNames)==12:
sectorNamesToReturn = [str(i) for i in np.arange(0,360,sectorSpan)]
elif len(sectorNames)!=12:
sectorNamesToReturn=sectorNames
if n == len(sectorNamesToReturn) and type(n) == int and n > 0: #this makes sure n is an integer and that the number of given sectors is equal to n if defined by user.
for i in range(n):
sectorsDic[sectorNamesToReturn[i]]=[cAngle(temp) for temp in np.arange(eachS2E[i],eachS2E[i+1],1)]
self.windSectors=sectorsDic
self.__conditionsDic["Sectors"]=sectorsDic
return pd.DataFrame(sectorsDic)
else:
raise Exception("Number of sectors and proposed number of names are not equal.")
def probabilityDistribution(self,aParams=[],kParams=[],probabs=[],avgWindSpeeds=[]):
if len(aParams)|len(kParams)|len(probabs)|len(avgWindSpeeds)!=len(self.windSectors):
raise Exception("Number of given parameters and existing number of sectors are not equal")
else:
pdDic={}
SectorNames=self.__conditionsDic["Sectors"].keys()
for index,i in enumerate(SectorNames):
pdDic[i]=[aParams[index],kParams[index],probabs[index],avgWindSpeeds[index]]
self.__conditionsDic["probabilityDistribution"]=pdDic
print(pdDic)
# self.__conditionsDic["ProbabilityDistributions"]=pdDic
# print(len(self.windSectors))
def test(self):
return self.uID
class windfarm:
"""
Creates wind farm object with the given unique ID. Pywinda will also create an internal shallow copy of the same windfarm object.
:param uniqueID: [*req*] Unique Id of the wind farm as a string.
:Example:
>>> from PyWinda import pywinda as pw
>>> curslack = pw.windfarm("Curslack_uID")
>>> print(pw.Curslack_uID==curslack)
True
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
created_windfarms=[]
def __init__(self,uniqueID,lifetime=25*365*24*3600):
if uniqueID in windfarm.created_windfarms: #Checks if the wind farm ID is already taken
raise Exception ("The wind farm unique ID [" + str(uniqueID) + "] is already taken.")
else:
if type(uniqueID) == str and len(uniqueID.split())==1:
if uniqueID in globals().keys(): #Checks if the given unique Id is not in conflict with user's already assigned variables.
raise Exception ("Another object with the same uniqe ID globally exists. New wind farm not created.")
else:
globals()[uniqueID] = self #wind farm is dynamicall created and referenced with the unique ID to the users assigned variable.
windfarm.created_windfarms.append(uniqueID) #we append the created wind farm to the list
self.uID=uniqueID
else:
raise Exception("Unique ID should be a string without spaces.")
self.createdSRTs=[] #This is the store dictionary. Stores the wind turbine reference names created in a particular wind farm
self.createdMRTs=[]
self.farmEnvironment=None #A wind farm will have only one environment
self.__numOfSRT=len(self.createdSRTs)
self.__numOfMRT=len(self.createdMRTs)
self.__allDistances=pd.DataFrame()
self.lifetime=lifetime #by default 25 years in seconds
@property #This helps to protect the info from direct changes by user
def info(self):
"""
Returns a data frame containing all the information about the wind farm.
:param None:
:Example:
>>> from PyWinda import pywinda as pw
>>> curslack=pw.windfarm("uID_Curslack")
>>> WT1=curslack.addTurbine('uID_WT1',turbineType='SRT',hubHeigt=120, x_horizontal=100,y_vertical=100)
>>> WT2=curslack.addTurbine('uID_WT2',turbineType='SRT',hubHeigt=120, x_horizontal=150,y_vertical=150)
>>> WT3=curslack.addTurbine('uID_MWT3',turbineType='MRT',hubHeigt=200, x_horizontal=300,y_vertical=300)
>>> print(curslack.info)
Property Value
0 Unique ID uID_Curslack
1 Created SRTs [uID_WT1, uID_WT2]
2 Created MRTs [uID_MWT3]
3 Number of SRTs 2
4 Number of MRTs 1
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
statistics={"Property":["Unique ID","Created SRTs", "Created MRTs","Number of SRTs","Number of MRTs"],
"Value":[self.uID,self.createdSRTs,self.createdMRTs,self.__numOfSRT,self.__numOfMRT]}
return | pd.DataFrame(statistics) | pandas.DataFrame |
"""
Two types of solvers/optimizers:
1. The first type take in an augmented data set returned by
data_augment, and try to minimize classification error over the
following hypothesis class: { h(X) = 1[ f(x) >= x['theta']] : f in F}
over some real-valued class F.
Input: augmented data set, (X, Y, W)
Output: a model that can predict label Y
These solvers are used with exp_grad
2. The second type simply solves the regression problem
on a data set (x, a, y)
These solvers serve as our unconstrained benchmark methods.
"""
import functools
import numpy as np
import pandas as pd
import random
import data_parser as parser
import data_augment as augment
from gurobipy import *
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, mean_absolute_error, log_loss
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingRegressor, GradientBoostingClassifier
import xgboost as xgb
import time
_LOGISTIC_C = 5 # Constant for rescaled logisitic loss; might have to
# change for data_augment
# from sklearn.model_selection import train_test_split
"""
Oracles for fair regression algorithm
"""
class SVM_LP_Learner:
"""
Gurobi based cost-sensitive classification oracle
Assume there is a 'theta' field in the X data frame
Oracle=CS; Class=linear
"""
def __init__(self, off_set=0, norm_bdd=1):
self.weights = None
self.norm_bdd = norm_bdd # initialize the norm bound to be 2
self.off_set = off_set
self.name = 'SVM_LP'
def fit(self, X, Y, W):
w = SVM_Gurobi(X, Y, W, self.norm_bdd, self.off_set)
self.weights = pd.Series(w, index=list(X.drop(['theta'], 1)))
def predict(self, X):
y_values = (X.drop(['theta'],
axis=1)).dot(np.array(self.weights))
pred = 1*(y_values - X['theta'] >= 0) # w * x - theta
return pred
class LeastSquaresLearner:
"""
Basic Least regression square based oracle
Oracle=LS; class=linear
"""
def __init__(self, Theta):
self.weights = None
self.Theta = Theta
self.name = "OLS"
def fit(self, X, Y, W):
matX, vecY = approximate_data(X, Y, W, self.Theta)
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=None)
self.weights = pd.Series(self.lsqinfo[0], index=list(matX))
def predict(self, X):
y_values = (X.drop(['theta'],
axis=1)).dot(np.array(self.weights))
pred = 1*(y_values - X['theta'] >= 0) # w * x - theta
return pred
class LogisticRegressionLearner:
"""
Basic Logistic regression baed oracle
Oralce=LR; Class=linear
"""
def __init__(self, Theta, C=10000, regr=None):
self.Theta = Theta
self.name = "LR"
if regr is None:
self.regr = LogisticRegression(random_state=0, C=C,
max_iter=1200,
fit_intercept=False,
solver='lbfgs')
else:
self.regr = regr
def fit(self, X, Y, W):
matX, vecY, vecW = approx_data_logistic(X, Y, W, self.Theta)
self.regr.fit(matX, vecY, sample_weight=vecW)
pred_prob = self.regr.predict_proba(matX)
def predict(self, X):
pred_prob = self.regr.predict_proba(X.drop(['theta'], axis=1))
prob_values = pd.DataFrame(pred_prob)[1]
y_values = (np.log(1 / prob_values - 1) / (- _LOGISTIC_C) + 1) / 2
# y_values = pd.DataFrame(pred_prob)[1]
pred = 1*(y_values - X['theta'] >= 0) # w * x - theta
return pred
class RF_Classifier_Learner:
"""
Basic RF classifier based CSC
Oracle=LR; Class=Tree ensemble
"""
def __init__(self, Theta):
self.Theta = Theta
self.name = "RF Classifier"
self.clf = RandomForestClassifier(max_depth=4,
random_state=0,
n_estimators=20)
def fit(self, X, Y, W):
matX, vecY, vecW = approx_data_logistic(X, Y, W, self.Theta)
self.clf.fit(matX, vecY, sample_weight=vecW)
def predict(self, X):
pred_prob = self.clf.predict_proba(X.drop(['theta'],
axis=1))
y_values = pd.DataFrame(pred_prob)[1]
pred = 1*(y_values - X['theta'] >= 0)
return pred
class XGB_Classifier_Learner:
"""
Basic GB classifier based oracle
Oracle=LR; Class=Tree ensemble
"""
def __init__(self, Theta, clf=None):
self.Theta = Theta
self.name = "XGB Classifier"
param = {'max_depth' : 3, 'silent' : 1, 'objective' :
'binary:logistic', 'n_estimators' : 150, 'gamma' : 2}
if clf is None:
self.clf = xgb.XGBClassifier(**param)
else:
self.clf = clf
def fit(self, X, Y, W):
matX, vecY, vecW = approx_data_logistic(X, Y, W, self.Theta)
self.clf.fit(matX, vecY, sample_weight=vecW)
def predict(self, X):
pred_prob = self.clf.predict_proba(X.drop(['theta'],
axis=1))
prob_values = pd.DataFrame(pred_prob)[1]
y_values = (np.log(1 / prob_values - 1) / (- _LOGISTIC_C) + 1) / 2
pred = 1*(y_values - X['theta'] >= 0)
return pred
class RF_Regression_Learner:
"""
Basic random forest based oracle
Oracle=LS; Class=Tree ensemble
"""
def __init__(self, Theta):
self.Theta = Theta
self.name = "RF Regression"
self.regr = RandomForestRegressor(max_depth=4, random_state=0,
n_estimators=200)
def fit(self, X, Y, W):
matX, vecY = approximate_data(X, Y, W, self.Theta)
self.regr.fit(matX, vecY)
def predict(self, X):
y_values = self.regr.predict(X.drop(['theta'], axis=1))
pred = 1*(y_values - X['theta'] >= 0) # w * x - theta
return pred
class XGB_Regression_Learner:
"""
Gradient boosting based oracle
Oracle=LS; Class=Tree Ensemble
"""
def __init__(self, Theta):
self.Theta = Theta
self.name = "XGB Regression"
params = {'max_depth': 4, 'silent': 1, 'objective':
'reg:linear', 'n_estimators': 200, 'reg_lambda' : 1,
'gamma':1}
self.regr = xgb.XGBRegressor(**params)
def fit(self, X, Y, W):
matX, vecY = approximate_data(X, Y, W, self.Theta)
self.regr.fit(matX, vecY)
def predict(self, X):
y_values = self.regr.predict(X.drop(['theta'], axis=1))
pred = 1*(y_values - X['theta'] >= 0) # w * x - theta
return pred
# HELPER FUNCTIONS HERE FOR BestH Oracles
def SVM_Gurobi(X, Y, W, norm_bdd, off_set):
"""
Solving SVM using Gurobi solver
X: design matrix with the last two columns being 'theta'
A: protected feature
impose ell_infty constraint over the coefficients
"""
d = len(X.columns) - 1 # number of predictive features (excluding theta)
N = X.shape[0] # number of augmented examples
m = Model()
m.setParam('OutputFlag', 0)
Y_aug = Y.map({1: 1, 0: -1})
# Add a coefficient variable per feature
w = {}
for j in range(d):
w[j] = m.addVar(lb=-norm_bdd, ub=norm_bdd,
vtype=GRB.CONTINUOUS, name="w%d" % j)
w = pd.Series(w)
# Add a threshold value per augmented example
t = {} # threshold values
for i in range(N):
t[i] = m.addVar(lb=0, vtype=GRB.CONTINUOUS, name="t%d" % i)
t = pd.Series(t)
m.update()
for i in range(N):
xi = np.array(X.drop(['theta'], 1).iloc[i])
yi = Y_aug.iloc[i]
theta_i = X['theta'][i]
# Hinge Loss Constraint
m.addConstr(t[i] >= off_set - (w.dot(xi) - theta_i) * yi)
m.setObjective(quicksum(t[i] * W.iloc[i] for i in range(N)))
m.optimize()
weights = np.array([w[i].X for i in range(d)])
return np.array(weights)
def approximate_data(X, Y, W, Theta):
"""
Given the augmented data (X, Y, W), recover for each example the
prediction in Theta + alpha/2 that minimizes the cost;
Thus we reduce the size back to the same orginal size
"""
n = int(len(X) / len(Theta)) # size of the dataset
alpha = (Theta[1] - Theta[0])/2
x = X.iloc[:n, :].drop(['theta'], 1)
pred_vec = Theta + alpha # the vector of possible preds
minimizer = {}
pred_vec = {} # mapping theta to pred vector
for pred in (Theta + alpha):
pred_vec[pred] = (1 * (pred >= pd.Series(Theta)))
for i in range(n):
index_set = [i + j * n for j in range(len(Theta))] # the set of rows for i-th example
W_i = W.iloc[index_set]
Y_i = Y.iloc[index_set]
Y_i.index = range(len(Y_i))
W_i.index = range(len(Y_i))
cost_i = {}
for pred in (Theta + alpha):
cost_i[pred] = abs(Y_i - pred_vec[pred]).dot(W_i)
minimizer[i] = min(cost_i, key=cost_i.get)
return x, pd.Series(minimizer)
def approx_data_logistic(X, Y, W, Theta):
"""
Given the augmented data (X, Y, W), recover for each example the
prediction in Theta + alpha/2 that minimizes the cost;
Then create a pair of weighted example so that the prob pred
will minimize the log loss.
"""
n = int(len(X) / len(Theta)) # size of the dataset
alpha = (Theta[1] - Theta[0])/2
x = X.iloc[:n, :].drop(['theta'], 1)
pred_vec = {} # mapping theta to pred vector
Theta_mid = [0] + list(Theta + alpha) + [1]
Theta_mid = list(filter(lambda x: x >= 0, Theta_mid))
Theta_mid = list(filter(lambda x: x <= 1, Theta_mid))
for pred in Theta_mid:
pred_vec[pred] = (1 * (pred >= pd.Series(Theta)))
minimizer = {}
for i in range(n):
index_set = [i + j * n for j in range(len(Theta))] # the set of rows for i-th example
W_i = W.iloc[index_set]
Y_i = Y.iloc[index_set]
Y_i.index = range(len(Y_i))
W_i.index = range(len(Y_i))
cost_i = {}
for pred in Theta_mid: # enumerate different possible
# predictions
cost_i[pred] = abs(Y_i - pred_vec[pred]).dot(W_i)
minimizer[i] = min(cost_i, key=cost_i.get)
matX = | pd.concat([x]*2, ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 14:09:59 2017
@author: SaintlyVi
"""
import pandas as pd
import numpy as np
from support import writeLog
def uncertaintyStats(submodel):
"""
Creates a dict with statistics for observed hourly profiles for a given year.
Use evaluation.evalhelpers.observedHourlyProfiles() to generate the input dataframe.
"""
allstats = list()
for c in submodel['class'].unique():
stats = submodel[submodel['class']==c].describe()
stats['customer_class'] = c
stats.reset_index(inplace=True)
stats.set_index(['customer_class','index'], inplace=True)
allstats.append(stats)
df = pd.concat(allstats)
return df[['AnswerID_count','valid_obs_ratio']]
def dataIntegrity(submodels, min_answerid, min_obsratio):
"""
This function returns the slice of submodels that meet the specified minimum uncertainty requirements. Submodels must form part of the same experiment (eg demand summary and hourly profiles).
"""
if isinstance(submodels, list):
models = submodels
else:
models = [submodels]
validmodels = pd.DataFrame(columns = ['submodel_name','valid_data','uncertainty_index',
'valid_unit_count', 'unit'])
for m in models:
name = m.name
valid_data = m[(m.AnswerID_count>=min_answerid) & (m.valid_obs_ratio>=min_obsratio)]
uix = len(valid_data) / len(m)
try:
valid_unit_count = valid_data['valid_hours'].sum()
unit = 'total_valid_hours'
except:
valid_unit_count = valid_data['AnswerID_count'].sum()
unit = 'valid_AnswerID_count'
validmodels = validmodels.append({'submodel_name':name,
'valid_data':valid_data,
'uncertainty_index':uix,
'valid_unit_count':valid_unit_count,
'unit':unit}, ignore_index=True)
validmodels.set_index('submodel_name', drop=True, inplace=True)
return validmodels
def modelSimilarity(ex_submodel, ex_ts, valid_new_submodel, new_ts, submod_type):
"""
This function calcualtes the evaluation measure for the run.
ex_submodel = (DataFrame) either existing/expert demand_summary or hourly_profiles submodel
valid_new_submodel = (DataFrame) output from dataIntegrity function
-> only want to compare valid data
submod_type = (str) one of [ds, hp]
-> ds=demand_summary, hp=hourly_profiles
"""
if submod_type == 'ds':
index_cols = ['class','YearsElectrified']
elif submod_type == 'hp':
index_cols = ['class','YearsElectrified','month','daytype','hour']
else:
return(print('Valid submod_type is one of [ds, hp] -> ds=demand_summary, hp=hourly_profiles.'))
merged_sub = ex_submodel.merge(valid_new_submodel, how='left', on=index_cols)
simvec = merged_sub[new_ts] - merged_sub[ex_ts]
simvec.dropna(inplace=True)
simveccount = len(simvec)
eucliddist = np.sqrt(sum(simvec**2))
return eucliddist, simveccount, merged_sub
def logCalibration(bm_model, year, exp_model, min_answerid = 2, min_obsratio = 0.85):
"""
This function logs the evaluation results of the run.
ex_model = [demand_summary, hourly_profiles, ds_val_col_name, hp_val_col_name]
"""
#Generate data model
ods = pd.read_csv('data/experimental_model/'+exp_model+'/demand_summary_'+year+'.csv')
ohp = | pd.read_csv('data/experimental_model/'+exp_model+'/hourly_profiles_'+year+'.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@author: <NAME> - https://www.linkedin.com/in/adamrvfisher/
"""
#This is a summary statistic + database query tool
#pandas_datareader is deprecated, use YahooGrabber
#Import modules
import numpy as np
import pandas.io.data as web
import pandas as pd
#Define function
def SDSD(s):
#Request data
s = | web.get_data_yahoo(s, start='1/1/1900', end='01/01/2018') | pandas.io.data.get_data_yahoo |
import requests
import pandas as pd
import geopandas as gpd
import glob
import re
import numpy as np
import os
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
from multiprocessing import Pool
import sqlite3
import schedule
import time
import datetime
import pytz
# Functions
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def function_requests(nom_fichier):
print(nom_fichier)
CEHQ_URL = "https://www.cehq.gouv.qc.ca/depot/historique_donnees/fichier/"
rq = requests.get(CEHQ_URL + os.path.basename(nom_fichier[0].strip()) + '_Q.txt') # create HTTP response object
if rq.status_code == 200:
with open(nom_fichier[0].strip() + '_Q.txt', 'wb') as f:
f.write(rq.content)
rn = requests.get(CEHQ_URL + os.path.basename(nom_fichier[0].strip()) + '_N.txt') # create HTTP response object
if rn.status_code == 200:
with open(nom_fichier[0].strip() + '_N.txt', 'wb') as f:
f.write(rn.content)
def CEHQ(f, DATA_PATH, DB_PATH, shp_path):
print('##########################################################')
print('[' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '] : NEW JOB STARTED: CEHQ update')
print('##########################################################')
ORIGINAL_PATH = 'https://www.cehq.gouv.qc.ca/hydrometrie/historique_donnees/ListeStation.asp?regionhydro=$&Tri=Non'
nb_region = ["%02d" % n for n in range(0, 13)]
regions = []
print('[' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '] Getting all available stations...')
for reg in nb_region:
path = ORIGINAL_PATH.replace('$', reg)
raw_html = simple_get(path)
html = BeautifulSoup(raw_html, 'html.parser')
for li in (html.select('area')):
if li['href'].find('NoStation')>0:
a = li['title'].split('-',1)
a[0] = a[0].strip()
a[0] = DATA_PATH + '/' + a[0]
regions.append(a)
print('[' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '] '+ str(len(regions)) + ' available stations...')
# URL Request
# Si parallèle
# with Pool(8) as p:
# p.map(f, regions)
# Sinon :
# for nom_fichier in regions:
# f(nom_fichier, DATA_PATH)
print('[' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '] Getting all available stations...done')
print('[' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '] Parsing all available stations')
# Parsing
listeStations = glob.glob(DATA_PATH + "/*.txt")
# Validate or correct file
def del_if_2_cols(lines):
if (len(lines[0].split()))!=4:
lines.pop(0)
del_if_2_cols(lines)
return lines
if os.name == 'nt':
encoding = "ISO-8859-1"
else:
encoding = "ISO-8859-1"
for file in listeStations:
with open(file, 'r', encoding=encoding) as f:
head = f.readlines()[0:21]
f.close()
with open(file, 'r', encoding=encoding) as f:
lines = f.readlines()[22:]
f.close()
if lines and (len(lines[0].split())) != 4:
print(os.path.splitext(os.path.basename(file))[0])
del_if_2_cols(lines)
text_fin = head + lines
with open(file, "w", encoding=encoding)as f:
f.write(''.join(map(str, text_fin)))
f.close()
list_coords = []
list_sup = []
list_stations = []
list_type = []
list_id = []
liste_nom = []
liste_regime = []
liste_regions = np.array([os.path.basename(file) for file in np.array(regions)[:, 0]])
# Ne retient que les stations pour lesquels un fichier de forme est disponible.
# Plus contraignant, mais plus propre pour conserver une base de données robuste
basename_listeStations = [ os.path.basename(x).split('.')[0].split("_")[0] for x in listeStations]
print(basename_listeStations)
print(len(basename_listeStations))
lst_shp = sorted(glob.glob(shp_path + '/*/*.shp'))
basename_lst_shp = [os.path.basename(x).split('.')[0].split("_")[0] for x in lst_shp]
basename_lst_shp = ([*{*basename_lst_shp}])
print(lst_shp)
print(len(basename_lst_shp))
available_stations = sorted(list(set(basename_listeStations).intersection(basename_lst_shp)))
print(available_stations)
print(len(available_stations))
listOfIndices_unique = [basename_listeStations.index(key) for key in available_stations]
print(listOfIndices_unique)
listOfIndices_unique = [basename_listeStations.index(key) for key in available_stations]
print(listOfIndices_unique)
print(len(listOfIndices_unique))
listOfIndices_not_unique = [i for y in available_stations for i, x in enumerate(basename_listeStations) if x == y]
print(listOfIndices_not_unique)
print(len(listOfIndices_not_unique))
for file in [listeStations[i] for i in listOfIndices_not_unique]:
with open(file, encoding=encoding) as f:
lines = f.readlines()
type_var = os.path.basename(file).replace('.', '_').split('_')[1]
if type_var == 'Q':
list_type.append('Debit')
else:
list_type.append('Niveau')
stations = lines[2]
itemindex = np.where(liste_regions == stations.split()[1])
nom_long = regions[itemindex[0][0]][-1].strip()
liste_nom.append(nom_long)
type_var = os.path.basename(file).replace('.', '_').split('_')[1]
liste_regime.append(lines[3][15:].split()[-1])
list_stations.append(stations.split()[1])
list_id.append(stations.split()[1] + '_' + type_var)
superficie = float(lines[3][15:].split()[0])
coords = lines[4][22:-2].split()
list_sup.append(superficie)
if len(coords) < 5:
coords = [x for x in coords if x]
list_coords.append([float(coords[0]), float(coords[2])])
elif len(coords) >= 5:
coords = [re.sub(r'[^-\d]', '', coord) for coord in coords]
coords = [x for x in coords if x]
list_coords.append([float(coords[0]) + float(coords[1]) / 60 + float(coords[2]) / 3600,
float(coords[3]) - float(coords[4]) / 60 - float(coords[5]) / 3600])
else:
print(file)
print('[' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '] Parsing all available stations...done')
print('[' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '] Making dataframes...')
df_sup1 = pd.DataFrame(np.array([list_id, list_stations, liste_nom, list_type, liste_regime,list_coords, list_sup]).T,
columns=['STATION_ID', 'NUMERO_STATION', 'NOM_STATION',
'TYPE_SERIE', 'REGIME', 'COORDS', 'SUPERFICIE'])
# Importation de tous les fichiers .txt vers une liste de dataframe
fields = ['DATE','VALUE']
dict_df = {os.path.splitext(os.path.basename(station))[0] :
pd.read_csv(station, skiprows=22,delim_whitespace=True,
usecols=[1, 2], names=fields, header=None,
encoding='latin1').fillna(np.nan)
for station in [listeStations[i] for i in listOfIndices_not_unique]}
key_to_delete = []
for key, value in dict_df.items():
value['VALUE'] = value['VALUE'].map(lambda x: float(str(x).lstrip('+-').rstrip('aAbBcC')))
value = value.set_index('DATE')
value.index = pd.to_datetime(value.index)
value.index = value.index.tz_localize("Etc/GMT+5", ambiguous='infer', nonexistent='shift_forward')
value.index = value.index.tz_convert("America/Montreal")
value['STATION_ID'] = key
dict_df[key] = value
if len(value['VALUE'].dropna()) > 0:
debut = value['VALUE'].dropna().index[0]
fin = value['VALUE'].dropna().index[-1]
df_sup1.loc[df_sup1.index[df_sup1['STATION_ID'] == key], 'DATE_DEBUT'] = debut
df_sup1.loc[df_sup1.index[df_sup1['STATION_ID'] == key], 'DATE_FIN'] = fin
else:
df_sup1.drop(df_sup1.index[df_sup1['STATION_ID'] == key], inplace=True)
key_to_delete.append(key)
print(key_to_delete)
for k in key_to_delete:
dict_df.pop(k, None)
df = pd.concat(dict_df.values())
df.reset_index(level=0, inplace=True)
# Index dataframe
df_sup1 = df_sup1.sort_values(by=['NUMERO_STATION']).reset_index().drop(['index'], axis=1)
df_sup1['LATITUDE'], df_sup1['LONGITUDE'] = df_sup1['COORDS'].map(lambda x: ':'.join(list(map(str, x)))).str.split(':', 1).str
df_sup1 = df_sup1.drop('COORDS', axis=1)
meta_sta_hydro = df_sup1.drop(columns=['STATION_ID','TYPE_SERIE', 'DATE_DEBUT', 'DATE_FIN'])
meta_sta_hydro = meta_sta_hydro.drop_duplicates()
meta_sta_hydro.insert(loc=2, column='PROVINCE', value='QC')
meta_sta_hydro.insert(loc=0, column='ID_POINT', value=range(1000, 1000 + meta_sta_hydro.shape[0], 1))
gdf_json = [gpd.read_file(shp).to_json() for shp in [shp_path + '/' + s + '/' + s + '.shp'
for s in available_stations]]
meta_sta_hydro.insert(loc=8, column='GEOM', value=gdf_json)
meta_sta_hydro.insert(loc=3, column='NOM_EQUIV', value=np.nan)
meta_ts = df_sup1.drop(columns = ['NOM_STATION','REGIME','SUPERFICIE','LATITUDE','LONGITUDE'])
meta_ts.insert(loc=3, column='PAS_DE_TEMPS', value='1_J')
meta_ts.insert(loc=4, column='AGGREGATION', value='moy')
meta_ts.insert(loc=5, column='UNITE', value='m3/s')
meta_ts.insert(loc=8, column='SOURCE', value='CEHQ')
meta_ts = pd.merge(meta_ts, meta_sta_hydro[['ID_POINT', 'NUMERO_STATION']],
left_on='NUMERO_STATION', right_on='NUMERO_STATION', how='left').drop(columns=['NUMERO_STATION'])
cols = meta_ts.columns.tolist()
cols = cols[-1:] + cols[:-1]
meta_ts = meta_ts[cols]
meta_ts.insert(loc=0, column='ID_SERIE', value=range(1000, 1000 + meta_ts.shape[0], 1))
df = pd.merge(df, meta_ts[['ID_SERIE', 'STATION_ID']],
left_on='STATION_ID', right_on='STATION_ID', how='left').drop(columns=['STATION_ID'])
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df[cols]
meta_ts = meta_ts.drop(columns=['STATION_ID'])
meta_ts['DATE_DEBUT'] = | pd.to_datetime(meta_ts['DATE_DEBUT']) | pandas.to_datetime |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: | pd.Timestamp("2012-06-20 00:00:00") | pandas.Timestamp |
import sys
import pandas as pd
import numpy as np
from pandas_profiling import ProfileReport
def test_example(get_data_file, test_output_dir):
file_name = get_data_file(
"meteorites.csv",
"https://data.nasa.gov/api/views/gh4g-9sfh/rows.csv?accessType=DOWNLOAD",
)
df = | pd.read_csv(file_name) | pandas.read_csv |
import collections
import json
import os
from datetime import time
import random
from tqdm import tqdm
from main import cvtCsvDataframe
import pickle
import numpy as np
import pandas as pd
import random
import networkx as nx
import time
from main import FPGrowth
from shopping import Shopping, Cell
import main
# QoL for display
pd.set_option('display.max_columns', 30)
def encodeData():
df = pd.read_csv('products.txt', delimiter="\t")
dataHere = df['Nome'].str.strip()
indexes = [x for x in range(0,len(dataHere))]
df['ID'] = indexes
#print(data.to_numpy())
return df
products = encodeData()
'''
It is suppose to simulate N amount of shopping trips given test wishlists and staminas.
1 - Create a shopping with the given configuration
2 - Generate N random wishlists and their stamina
3 - Simulate each one and save the results
4 - Analyse the supermarket profit
'''
class SoS:
def __init__(self, configuration, staminaDistr,explanations):
self.shoppingClass = Shopping([23,21],configuration)
#self.shoppingClass.changeShoppingConfig(configuration)
self.shopping = self.shoppingClass.shopping
self.staminaDistr = staminaDistr
self.explanations = explanations
self.auxNeighbors = self.getAuxNeighbors()
self.auxNeighborsPrimary = self.getAuxNeighborsPrimary()
data, explanations = cvtCsvDataframe(pd.read_csv("data.csv"), pd.read_csv("explanations.csv"))
mergedReceiptExplanations = pd.merge(data, explanations, on='receiptID', how='outer')
self.boughtAndWishlist = mergedReceiptExplanations[['PRODUCTS', 'WISHLIST']].to_numpy()
def generateCustomers(self, samples):
'''
:return: Returns a sample of random customers with stamina and wishlist
'''
customers = []
wishlists = list(self.explanations['WISHLIST'].to_numpy())
randomWishlists = random.sample(wishlists,samples)
staminas = self.staminaDistr.sample(samples)
for i, j in zip(randomWishlists,staminas):
customers.append((i,int(j)))
return customers
def findNeighbors(self, currentCell, typeSearch):
'''
:param currentCell: Current cell to search
:param typeSearch: Type of search 1 - Halls 2- Shelves
:return: Return the neighbors
'''
neighbors = []
try:
#If there are neighbors in the top
if currentCell[0] > 0:
#Get the top neighbor
neighbors.append(self.shopping[currentCell[0] - 1][currentCell[1]].id)
#If there are neighbors on the left
if currentCell[1] > 0:
neighbors.append(self.shopping[currentCell[0]][currentCell[1] - 1].id)
#If there are neighbors on the right
if currentCell[1] < self.shopping.shape[1]:
neighbors.append(self.shopping[currentCell[0]][currentCell[1] + 1].id)
#If there are neighbors on the bottom
if currentCell[0] < self.shopping.shape[0]:
neighbors.append(self.shopping[currentCell[0] + 1][currentCell[1]].id)
except:
pass
aux = []
if typeSearch == 1:
notToAdd = [1,461,483,23]
for i in neighbors:
if i not in self.shoppingClass.config and i not in notToAdd:
aux.append(i)
else:
notToAdd = [1, 461, 483, 23]
for i in neighbors:
if i in self.shoppingClass.config and i not in notToAdd:
aux.append(i)
return aux
def findClosestProduct(self, item):
'''
:param item: Receives an item to search for
:return: Returns the closest product path there is
'''
size = self.shopping.shape
allPathsToItem = []
for j in range(size[1]):
for i in range(size[0]):
if self.shopping[i][j].product == item:
pathsToThisCell = self.auxNeighborsPrimary[f"[{i},{j}]"]
for s in pathsToThisCell: allPathsToItem.append(s)
pathsLenght = []
paths = []
for possiblePath in allPathsToItem:
paths.append(nx.dijkstra_path(self.shoppingClass.graphShopping, self.shoppingClass.entrance, possiblePath))
pathsLenght.append(len(nx.dijkstra_path(self.shoppingClass.graphShopping, self.shoppingClass.entrance, possiblePath)))
#Return the minimium path
return paths[np.argmin(pathsLenght)]
def getAuxNeighborsPrimary(self):
aux = {}
size = self.shopping.shape
for j in range(size[1]):
for i in range(size[0]):
aux[f"[{i},{j}]"] = self.findNeighbors([i, j], 1)
return aux
def getAuxNeighbors(self):
aux = {}
size = self.shopping.shape
for j in range(size[1]):
for i in range(size[0]):
aux[f"[{i},{j}]"] = self.findNeighbors([i, j], 2)
return aux
def getCellProducts(self, cell):
size = self.shopping.shape
for j in range(size[1]):
for i in range(size[0]):
if self.shopping[i][j].id == cell:
cells = self.auxNeighbors[f"[{i},{j}]"]
products = []
for c in cells:
products.append(self.shoppingClass.productsAux[c])
return products
def getProbabilityOfPicking(self, product):
#Check if the file already exists
if os.path.exists("probabilityBuy.p"): probToBuy = pickle.load(open("probabilityBuy.p","rb"))
#Otherwise write it
else:
# organize_data()
# Read the csv file and convert it to a well formatted dataframe
aux = {}
#For each receipt
for p in tqdm(self.boughtAndWishlist):
#go through the products bought
for i in p[0]:
if i not in list(aux.keys()):
aux[i] = {'NotIn': 0, 'Counter':0}
#Increase counter
aux[i]['Counter'] += 1
#If the product bought is not in the wishlist
if i not in p[1]:
#Increase counter of times that the product was bought and was not in the wishlist
aux[i]['NotIn'] += 1
probToBuy = {}
for k in aux:
probToBuy[k] = aux[k]['NotIn'] / aux[k]['Counter']
pickle.dump(probToBuy,open("probabilityBuy.p","wb"))
#Reutrn the respective probability
return probToBuy[product]
def simulateCustomers(self,customers):
'''
:param customers: Receives a list of customers
:return: Returns the simulation results
'''
sales = []
#For each customer
for customer in tqdm(customers):
currentWishlist = customer[0]
currentWishlist.reverse()
currentStamina = customer[1]
productsBought = []
#print(f"Customer wishlist: {currentWishlist}")
#While the customer still has products the wants and still has stamina keep the simulation
while len(currentWishlist) > 0 and currentStamina > 0:
item = currentWishlist[0]
#print(f"Looking for {products.loc[products['ID'] == item, 'Nome'].iloc[0]}")
closest = self.findClosestProduct(item)
#print(f"Found {products.loc[products['ID'] == item, 'Nome'].iloc[0]} on cell {closest[-1]}")
for cell in range(len(closest)):
#print(f"I am on cell {closest[cell]}")
prodcutsCloseToCell = self.getCellProducts(closest[cell])
for prod in prodcutsCloseToCell:
#If the product is in the wishlist then buy it
if prod in currentWishlist:
#print(f"Found {products.loc[products['ID'] == prod, 'Nome'].iloc[0]} which was in my wishlist, so I bought it.")
#Remove it from the wishlist
currentWishlist.remove(prod)
productsBought.append(prod)
#Otherwise calculate the probability of buying it
else:
#Probability of this product being picked without being in the wishlist
prob = self.getProbabilityOfPicking(prod)
#Random probability
randomProb = random.uniform(0,1)
#If it is bought
if randomProb <= prob:
productsBought.append(prod)
#print(f"Felt like buying {products.loc[products['ID'] == prod, 'Nome'].iloc[0]}, so I bought it.")
currentStamina -= 1
#print(f"Current stamina : {currentStamina}")
#Scenarios that the person leaves the shopping
if currentStamina <= 0:
#print("I got tired!")
break
elif len(currentWishlist) <= 0:
#print("Bought everything!")
break
sales.append(productsBought)
return sales
def evaluateShoppingCost(self, sales):
'''
:param sales: Receives a list of sales from customers
:return: Return the calcualte profit for those sales
'''
totalProfit = 0
for sale in tqdm(sales):
for product in sale:
totalProfit += (products.loc[products['ID'] == product, 'Preço'].iloc[0] / products.loc[products['ID'] == product, '<NAME>'].iloc[0])
return totalProfit
def generateSimulator(config):
#QoL for display
pd.set_option('display.max_columns', 30)
data, explanations = main.cvtCsvDataframe(pd.read_csv("data.csv"), pd.read_csv("explanations.csv"))
mergedReceiptExplanations = pd.merge(data, explanations, on='receiptID', how='outer')
simulator = SoS(config,main.obtainStaminaDistribution(mergedReceiptExplanations['DISTANCE'].to_numpy()), explanations)
return simulator
def orderProductsPerImportanceAndProfit(shop):
#Order products
ordered = products.sort_values(by=['<NAME>'], ascending=True)
ordered = ordered['ID'].to_numpy()
aux = []
for p in ordered:
for _ in range(products.loc[products['ID'] == p,'Total Prateleiras'].iloc[0]):
aux.append(p)
size = [23,21]
ranksShelves = {}
#Order importance cells
for j in range(size[1]):
for i in range(size[0]):
ranksShelves[shop.shopping[i][j].id] = shop.shopping[i][j].rank
ranksShelves = dict(sorted(ranksShelves.items(), key=lambda item: item[1]))
indice = 0
for i in ranksShelves.keys():
if i in shop.shoppingClass.config:
ranksShelves[i] = int(aux[indice])
indice += 1
with open("profitImportance.json","w") as f:
json.dump(ranksShelves,f)
return ranksShelves
def orderProductsPerPair(shop):
data, explanations = cvtCsvDataframe(pd.read_csv("data.csv"), pd.read_csv("explanations.csv"))
if os.path.exists("productPair.csv"):
dfAux = pd.read_csv("productPair.csv")
shelves = dfAux['shelve'].to_numpy()
products_aux = dfAux['product'].to_numpy()
dictionary = {}
for i, j in zip(shelves,products_aux):
dictionary[i] = j
return dict(collections.OrderedDict(sorted(dictionary.items())))
else:
#Order products
ordered = products.sort_values(by=['<NAME>'], ascending=True)
ordered = ordered['ID'].to_numpy()
aux = []
for p in ordered:
for _ in range(products.loc[products['ID'] == p,'Total Prateleiras'].iloc[0]):
aux.append(p)
size = [23,21]
ranksShelves = {}
auxRankShelves = {}
#Order importance cells
for j in range(size[1]):
for i in range(size[0]):
ranksShelves[shop.shopping[i][j].id] = shop.shopping[i][j].rank
auxRankShelves[shop.shopping[i][j].id] = False
ranksShelves = dict(sorted(ranksShelves.items(), key=lambda item: item[1]))
indice = 0
pairShelves1 = [[6,7,8,9],
[185,208,231,254],
[215,216,217,218],
[470,471,472,473],
[250,273,296,251]]
resultsFP = FPGrowth(data,0.6,0.5)
resultsFP = [list(s) for s in resultsFP]
for shelves in pairShelves1:
counter = 0
for shelf in shelves:
try:
sample = random.sample(resultsFP[counter],1)[0]
ranksShelves[shelf] = sample
aux.remove(sample)
auxRankShelves[shelf] = True
except:
pass
counter+=1
#First place the products pairs
for i in ranksShelves.keys():
if i in shop.shoppingClass.config:
if not auxRankShelves[i]:
ranksShelves[i] = int(aux[indice])
indice += 1
dfAux = | pd.Series(ranksShelves) | pandas.Series |
import pandas as pd
import plotly.tools as tls
from plotly.offline import iplot
class backtest:
def __init__(self, base, close, start, period, positions_dict):
if isinstance(close, pd.Series):
close = pd.DataFrame(close)
start = pd.to_datetime(start)
self.dc = dca(base, close, start, period)
self.bh = buyandhold(base, close, start, period)
self.strats = dict()
for ind, values in positions_dict.items():
self.strats[ind] = strategy(base, close, values, start, period)
def get_strat_stats(self, name):
if name == "DollarCostAvg":
return self.dc
elif name == "BuyandHold":
return self.bh
else:
return self.strats[name]
def get_values(self):
all_values = pd.concat([self.dc.TotalValue, self.bh.TotalValue], axis=1)
all_values.columns = ["DollarCostAvg", "BuyandHold"]
for i in self.strats.keys():
all_values[i] = self.strats[i].TotalValue
return all_values
def get_plot(self):
d1 = self.get_values()
d1.sort_index(ascending=True, inplace=True)
fig = tls.make_subplots(rows=2, cols=1, shared_xaxes=True)
close = | pd.DataFrame(self.dc.Close) | pandas.DataFrame |
#
# Prepare of the hvorg screenshots
#
import os
import pickle
import json
import numpy as np
import pandas as pd
from sunpy.time import parse_time, is_time
# The sources ids
get_sources_ids = 'getDataSources.json'
# Save the data
save_directory = os.path.expanduser('~/Data/hvanalysis/derived')
# Read in the data
directory = os.path.expanduser('~/Data/hvanalysis/source')
hvorg_screenshots = 'screenshots.csv'
# hvorg_screenshots = 'screenshots_test.csv'
f = os.path.join(directory, hvorg_screenshots)
print('Loading ' + f)
path = os.path.expanduser(f)
df = pd.read_csv(path)
hvorg_screenshots_legacy = 'screenshots_legacy.csv'
# hvorg_screenshots_legacy = 'screenshots_test.csv'
f = os.path.join(directory, hvorg_screenshots_legacy)
print('Loading ' + f)
path = os.path.expanduser(f)
df_legacy = | pd.read_csv(path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Plotting utilities.
"""
import logging
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from .utils import auto_bins
sns.set()
sns.set_style('ticks')
logger = logging.getLogger(__name__)
pairplot_kwargs = dict(corner=True, kind='scatter',
diag_kws=dict(histtype='step', bins='auto', lw=1.5,
density=True, color='teal'),
plot_kws=dict(s=1.0, edgecolor=None, palette='viridis',
color='teal'))
def plot_live_points(live_points, filename=None, bounds=None, c=None,
**kwargs):
"""
Plot a set of live points in a corner-like plot.
Parameters
----------
live_points : ndarray
Structured array of live points to plot.
filename : str
Filename for resulting figure
bounds : dict:
Dictionary of lower and upper bounds to plot
c : str, optional
Name of field in the structured array to use as the hue when plotting
the samples. If not specified, no hue is used.
kwargs :
Keyword arguments used to update the pairplot kwargs. Diagonal and off-
diagonal plots can be configured with ``diag_kws`` and ``plot_kws``.
"""
pairplot_kwargs.update(kwargs)
df = | pd.DataFrame(live_points) | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = | concat([df, df], axis=1) | pandas.concat |
"""
Functions for writing a directory for iModulonDB webpages
"""
import logging
import os
import re
from itertools import chain
from zipfile import ZipFile
import numpy as np
import pandas as pd
from matplotlib.colors import to_hex
from tqdm.notebook import tqdm
from pymodulon.plotting import _broken_line, _get_fit, _solid_line
##################
# User Functions #
##################
def imodulondb_compatibility(model, inplace=False, tfcomplex_to_gene=None):
"""
Checks for all issues and missing information prior to exporting to iModulonDB.
If inplace = True, modifies the model (not recommended for main model variables).
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
IcaData object to check
inplace: bool, optional
If true, modifies the model to prepare for export.
Not recommended for use with your main model variable.
tfcomplex_to_gene: dict, optional
dictionary pointing complex TRN entries to matching gene names in the gene
table (ex: {"FlhDC":"flhD"})
Returns
-------
table_issues: pd.DataFrame
Each row corresponds to an issue with one of the main class elements.
Columns:
* Table: which table or other variable the issue is in
* Missing Column: the column of the Table with the issue (not case
sensitive; capitalization is ignored).
* Solution: Unless "CRITICAL" is in this cell, the site behavior if the
issue remained is described here.
tf_issues: pd.DataFrame
Each row corresponds to a regulator that is used in the imodulon_table.
Columns:
* in_trn: whether the regulator is in the model.trn. Regulators not
in the TRN will be ignored in the site's histograms and gene tables.
* has_link: whether the regulator has a link in tf_links. If not, no
link to external regulator databases will be shown.
* has_gene: whether the regulator can be matched to a gene in the model.
If this is false, then there will be no regulator scatter plot on the
site. You can link TF complexes to one of their genes using the
tfcomplex_to_gene input.
missing_g_links: pd.Series
The genes on this list don't have links in the gene_links. Their gene pages
for these genes will not display links.
missing_DOIs: pd.Series
The samples listed here don't have DOIs in the sample_table. Clicking on their
associated bars in the activity plots will not link to relevant papers.
"""
if tfcomplex_to_gene is None:
tfcomplex_to_gene = {}
table_issues = pd.DataFrame(columns=["Table", "Missing Column", "Solution"])
# Check for X
if model.X is None:
table_issues = table_issues.append(
{
"Table": "X",
"Missing Column": "all",
"Solution": "CRITICAL. Add the expression matrix"
" so that gene pages can be generated.",
},
ignore_index=True,
)
logging.warning("Critical issue: No X matrix")
# Check for updated imodulondb table
default_imdb_table = {
"organism": "New Organism",
"dataset": "New Dataset",
"strain": "Unspecified",
"publication_name": "Unpublished Study",
"publication_link": "",
"gene_link_db": "External Database",
"organism_folder": "new_organism",
"dataset_folder": "new_dataset",
}
for k, v in default_imdb_table.items():
if model.imodulondb_table[k] == v:
if k == "publication_link":
solution = "The publication name will not be a hyperlink."
else:
solution = 'The default, "{}", will be used.'.format(v)
table_issues = table_issues.append(
{
"Table": "iModulonDB",
"Missing Column": k,
"Solution": solution,
},
ignore_index=True,
)
# Check the gene table
gene_table_cols = {
"gene_name": "Locus tags (gene_table.index) will be used.",
"gene_product": "Locus tags (gene_table.index) will be used.",
"cog": "COG info will not display & the gene scatter plot will"
" not have color.",
"start": "The x axis of the scatter plot will be a numerical"
" value instead of a genome location.",
"operon": "Operon info will not display.",
"regulator": "Regulator info will not display. If you have a"
" TRN, add it to the model to auto-generate this column.",
}
gene_table_lower = {i.lower(): i for i in model.gene_table.columns}
for col in gene_table_cols.keys():
if not (col in gene_table_lower.keys()):
table_issues = table_issues.append(
{
"Table": "Gene",
"Missing Column": col,
"Solution": gene_table_cols[col],
},
ignore_index=True,
)
if (col in ["gene_name", "gene_product"]) & inplace:
model.gene_table[col] = model.gene_table.index
elif inplace:
model.gene_table = model.gene_table.rename(
{gene_table_lower[col]: col}, axis=1
)
# check for missing gene links
missing_g_links = []
for g in model.M.index:
if (
not (isinstance(model.gene_links[g], str))
or model.gene_links[g].strip() == ""
):
missing_g_links.append(g)
missing_g_links = pd.Series(missing_g_links, name="missing_gene_links")
# check for errors in the n_replicates column of the sample table
if inplace & ("n_replicates" in model.sample_table.columns):
try:
imdb_activity_bar_df(model, model.imodulon_table.index[0])
except ValueError:
logging.warning(
"Error detected in sample_table['n_replicates']."
" Deleting that column. It will be auto-regenerated."
" You can prevent this from happening in the future"
" using generate_n_replicates_column(model)"
)
model.sample_table = model.sample_table.drop("n_replicates", 1)
# check the sample table
sample_table_cols = {
"project": "This is a CRITICAL column defining the largest"
" grouping of samples. Vertical bars in the activity plot"
" will separate projects.",
"condition": "This is an CRITICAL column defining the smallest"
" grouping of samples. Biological replicates must have matching"
" projects and conditions, and they will appear as single bars"
" with averaged activities.",
"sample": "The sample_table.index will be used. Each entry must be"
' unique. Note that the preferred syntax is "project__condition__#."',
"n_replicates": "This column will be generated for you.",
"doi": "Clicking on activity plot bars will not link to relevant"
" papers for the samples.",
}
sample_table_lower = {i.lower(): i for i in model.sample_table.columns}
if model.sample_table.columns.str.lower().duplicated().any():
logging.warning(
"Critical issue: Duplicated column names"
" (case insensitive) in sample_table"
)
table_issues = table_issues.append(
{
"Table": "Sample",
"Missing Column": "N/A - Duplicated Columns Exist",
"Solution": "Column names (case insensitive) should not "
"be duplicated. Pay special attention the 'sample' column.",
},
ignore_index=True,
)
for col in sample_table_cols.keys():
if not (col in sample_table_lower.keys()):
if (col == "sample") & (model.sample_table.index.name == "sample"):
continue
if col in ["project", "condition"]:
logging.warning(
"Critical issue: No {} column in sample_table.".format(col)
)
table_issues = table_issues.append(
{
"Table": "Sample",
"Missing Column": col,
"Solution": sample_table_cols[col],
},
ignore_index=True,
)
if (col == "n_replicates") & inplace:
generate_n_replicates_column(model)
elif inplace:
model.sample_table = model.sample_table.rename(
{sample_table_lower[col]: col}, axis=1
)
# check for missing DOIs
if "doi" in sample_table_lower.keys():
if inplace:
doi_idx = "doi"
else:
doi_idx = sample_table_lower["doi"]
missing_DOIs = model.sample_table.index[
model.sample_table[doi_idx].isna()
].copy()
missing_DOIs.name = "missing_DOIs"
else:
missing_DOIs = model.sample_table.index.copy()
missing_DOIs.name = "missing_DOIs"
# check the iModulon table columns
try:
model.imodulon_table.index.astype(int)
im_idx = "int"
except TypeError:
im_idx = "str"
iM_table_cols = {
"name": "imodulon_table.index will be used.",
"regulator": "The regulator details will be left blank.",
"function": "The function will be blank in the dataset table and"
' "Uncharacterized" in the iModulon dashboard',
"category": 'The categories will be filled in as "Uncharacterized".',
"n_genes": "This column will be computed for you.",
"precision": "This column will be left blank.",
"recall": "This column will be left blank.",
"exp_var": "This column will be left blank.",
}
iM_table_lower = {i.lower(): i for i in model.imodulon_table.columns}
for col in iM_table_cols.keys():
if not (col in iM_table_lower.keys()):
table_issues = table_issues.append(
{
"Table": "iModulon",
"Missing Column": col,
"Solution": iM_table_cols[col],
},
ignore_index=True,
)
if inplace:
if col == "name":
if im_idx == "int":
model.imodulon_table["name"] = [
"iModulon {}".format(i) for i in model.imodulon_table.index
]
else:
model.imodulon_table["name"] = model.imodulon_table.index
elif col == "n_genes":
model.imodulon_table["n_genes"] = model.M_binarized.sum().astype(
int
)
else:
model.imodulon_table[col] = np.nan
elif inplace:
model.imodulon_table = model.imodulon_table.rename(
{iM_table_lower[col]: col}, axis=1
)
if inplace:
if im_idx == "str":
model.rename_imodulons(
dict(zip(model.imodulon_names, range(len(model.imodulon_names))))
)
for idx, tf in zip(model.imodulon_table.index, model.imodulon_table.regulator):
try:
model.imodulon_table.loc[idx, "regulator_readable"] = (
model.imodulon_table.regulator[idx]
.replace("/", " or ")
.replace("+", " and ")
)
except AttributeError:
model.imodulon_table.loc[
idx, "regulator_readable"
] = model.imodulon_table.regulator[idx]
# check the TRN
cols = ["in_trn", "has_link", "has_gene"]
tf_issues = pd.DataFrame(columns=cols)
if "regulator" in iM_table_lower.keys():
if inplace:
reg_idx = "regulator"
else:
reg_idx = iM_table_lower["regulator"]
for tf_string in model.imodulon_table[reg_idx]:
_, no_trn = parse_tf_string(model, tf_string)
_, no_link = tf_with_links(model, tf_string)
_, no_gene = get_tfs_to_scatter(model, tf_string, tfcomplex_to_gene)
tfs_to_add = set(no_trn + no_link + no_gene)
for tf in tfs_to_add:
row = dict(zip(cols, [True] * 3))
for col, tf_set in zip(cols, [no_trn, no_link, no_gene]):
if tf in tf_set:
row[col] = False
tf_issues.loc[tf] = row
return table_issues, tf_issues, missing_g_links, missing_DOIs
def imodulondb_export(
model,
path=".",
cat_order=None,
tfcomplex_to_gene=None,
skip_iMs=False,
skip_genes=False,
):
"""
Generates the iModulonDB page for the data and exports to the path.
If certain columns are unavailable but can be filled in automatically,
they will be.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object to export
path : str, optional
Path to iModulonDB main hosting folder (default = ".")
cat_order : list, optional
List of categories in the imodulon_table, ordered as you would
like them to appear in the dataset table (default = None)
tfcomplex_to_gene : dict, optional
dictionary pointing complex TRN entries
to matching gene names in the gene table
ex: {"FlhDC":"flhD"}
skip_iMs : bool, optional
If this is True, do not output iModulon files (to save time)
skip_genes : bool, optional
If this is True, do not output gene files (to save time)
Returns
-------
None: None
"""
if tfcomplex_to_gene is None:
tfcomplex_to_gene = {}
model1 = model.copy()
imodulondb_compatibility(model1, True, tfcomplex_to_gene=tfcomplex_to_gene)
print("Writing main site files...")
folder = imodulondb_main_site_files(model1, path, cat_order=cat_order)
print("Done writing main site files. Writing plot files...")
if not (skip_iMs and skip_genes):
print(
"Two progress bars will appear below. The second will take "
"significantly longer than the first."
)
if not (skip_iMs):
print("Writing iModulon page files (1/2)")
imdb_generate_im_files(model1, folder, "start", tfcomplex_to_gene)
if not (skip_genes):
print("Writing Gene page files (2/2)")
imdb_generate_gene_files(model1, folder)
print(
"Complete! (Organism = {}; Dataset = {})".format(
model1.imodulondb_table["organism_folder"],
model1.imodulondb_table["dataset_folder"],
)
)
###############################
# Major Outputs (Called Once) #
###############################
def imdb_dataset_table(model):
"""
Converts the model's imodulondb_table into dataset metadata
for the gray box on the left side of the dataset page
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
An IcaData object
Returns
-------
res: ~pandas.Series
A series of formatted metadata
"""
res = pd.Series(dtype=str)
if model.imodulondb_table["organism"] == "New Organism":
org_short = ""
else:
org_parts = model.imodulondb_table["organism"].split(" ")
org_short = org_parts[0][0].upper() + ". " + org_parts[1].lower()
org_short = "<i>" + org_short + "</i>"
res["Title"] = org_short + " " + model.imodulondb_table["dataset"]
res["Organism"] = "<i>" + model.imodulondb_table["organism"] + "</i>"
res["Strain"] = model.imodulondb_table["strain"]
if model.imodulondb_table["publication_link"] == "":
res["Publication"] = model.imodulondb_table["publication_name"]
else:
pub_str = '<a href="' + model.imodulondb_table["publication_link"]
pub_str += '">' + model.imodulondb_table["publication_name"] + "</a>"
res["Publication"] = pub_str
res["Number of Samples"] = model.A.shape[1]
if ("project" in model.sample_table.columns) and (
"condition" in model.sample_table.columns
):
num_conds = len(model.sample_table.groupby(["condition", "project"]))
else:
num_conds = "Unknown"
res["Number of Unique Conditions"] = num_conds
res["Number of Genes"] = model.M.shape[0]
res["Number of iModulons"] = model.M.shape[1]
return res
def imdb_iM_table(imodulon_table, cat_order=None):
"""
Reformats the iModulon table according
Parameters
----------
imodulon_table : ~pandas.DataFrame
Table formatted similar to IcaData.imodulon_table
cat_order : list, optional
List of categories in imodulon_table.category, ordered as desired
Returns
-------
im_table: ~pandas.DataFrame
New iModulon table with the columns expected by iModulonDB
"""
im_table = imodulon_table[
[
"name",
"regulator_readable",
"function",
"category",
"n_genes",
"exp_var",
"precision",
"recall",
]
].copy()
im_table.index.name = "k"
im_table.category = im_table.category.fillna("Uncharacterized")
if cat_order is not None:
cat_dict = {val: i for i, val in enumerate(cat_order)}
im_table.loc[:, "category_num"] = [
cat_dict[im_table.category[k]] for k in im_table.index
]
else:
try:
im_table.loc[:, "category_num"] = imodulon_table["new_idx"]
except KeyError:
im_table.loc[:, "category_num"] = im_table.index
return im_table
def imdb_gene_presence(model):
"""
Generates the two versions of the gene presence file, one as a binary
matrix, and one as a DataFrame
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
An IcaData object
Returns
-------
mbin: ~pandas.DataFrame
Binarized M matrix
mbin_list: ~pandas.DataFrame
Table mapping genes to iModulons
"""
mbin = model.M_binarized.astype(bool)
mbin_list = pd.DataFrame(columns=["iModulon", "Gene"])
for k in mbin.columns:
for g in mbin.index[mbin[k]]:
mbin_list = mbin_list.append({"iModulon": k, "Gene": g}, ignore_index=True)
return mbin, mbin_list
def imodulondb_main_site_files(
model, path_prefix=".", rewrite_annotations=True, cat_order=None
):
"""
Generates all parts of the site that do not require large iteration loops
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
path_prefix : str, optional
Main folder for iModulonDB files (default = ".")
rewrite_annotations : bool, optional
Set to False if the gene_table and trn are unchanged (default = True)
cat_order : list, optional
list of categories in data.imodulon_table.category, ordered as you want
them to appear on the dataset page (default = None)
Returns
-------
main_folder: str
Dataset folder, for use as the path_prefix in imdb_generate_im_files()
"""
organism = model.imodulondb_table["organism_folder"]
dataset = model.imodulondb_table["dataset_folder"]
# create new folders
organism_folder = os.path.join(path_prefix, "organisms", organism)
if not (os.path.isdir(organism_folder)):
os.makedirs(organism_folder)
annot_folder = os.path.join(organism_folder, "annotation")
if not (os.path.isdir(annot_folder)):
rewrite_annotations = True
os.makedirs(annot_folder)
# save annotations
if rewrite_annotations:
# make the folder if necessary
gene_folder = os.path.join(annot_folder, "gene_files")
if not (os.path.isdir(gene_folder)):
os.makedirs(gene_folder)
# add files to the folder
model.gene_table.to_csv(os.path.join(gene_folder, "gene_info.csv"))
try:
model.trn.to_csv(os.path.join(gene_folder, "trn.csv"))
except FileNotFoundError:
pass
# zip the folder
old_cwd = os.getcwd()
os.chdir(gene_folder)
with ZipFile("../gene_files.zip", "w") as z:
z.write("gene_info.csv")
z.write("trn.csv")
os.chdir(old_cwd)
main_folder = os.path.join(organism_folder, dataset)
if not (os.path.isdir(main_folder)):
os.makedirs(main_folder)
# save the metadata files in the main folder
dataset_meta = imdb_dataset_table(model)
dataset_meta.to_csv(os.path.join(main_folder, "dataset_meta.csv"))
# num_ims - used so that the 'next iModulon' button doesn't overflow
file = open(main_folder + "/num_ims.txt", "w")
file.write(str(model.M.shape[1]))
file.close()
# save the dataset files in the data folder
data_folder = os.path.join(main_folder, "data_files")
if not (os.path.isdir(data_folder)):
os.makedirs(data_folder)
model.X.to_csv(os.path.join(data_folder, "log_tpm.csv"))
model.A.to_csv(os.path.join(data_folder, "A.csv"))
model.M.to_csv(os.path.join(data_folder, "M.csv"))
im_table = imdb_iM_table(model.imodulon_table, cat_order)
im_table.to_csv(os.path.join(data_folder, "iM_table.csv"))
model.sample_table.to_csv(os.path.join(data_folder, "sample_table.csv"))
mbin, mbin_list = imdb_gene_presence(model)
mbin.to_csv(os.path.join(data_folder, "gene_presence_matrix.csv"))
mbin_list.to_csv(os.path.join(data_folder, "gene_presence_list.csv"))
pd.Series(model.thresholds).to_csv(os.path.join(data_folder, "M_thresholds.csv"))
# zip the data folder
old_cwd = os.getcwd()
os.chdir(data_folder)
with ZipFile("../data_files.zip", "w") as z:
z.write("log_tpm.csv")
z.write("A.csv")
z.write("M.csv")
z.write("iM_table.csv")
z.write("sample_table.csv")
z.write("gene_presence_list.csv")
z.write("gene_presence_matrix.csv")
z.write("M_thresholds.csv")
os.chdir(old_cwd)
# make iModulons searchable
enrich_df = model.imodulon_table.copy()
enrich_df["component"] = enrich_df.index
enrich_df = enrich_df[["component", "name", "regulator", "function"]]
enrich_df = enrich_df.rename({"function": "Function"}, axis=1)
try:
enrich_df = enrich_df.sort_values(by="name").fillna(value="N/A")
except TypeError:
enrich_df["name"] = enrich_df["name"].astype(str)
enrich_df = enrich_df.sort_values(by="name").fillna(value="N/A")
if not (os.path.isdir(main_folder + "/iModulon_files")):
os.makedirs(main_folder + "/iModulon_files")
enrich_df.to_json(main_folder + "/iModulon_files/im_list.json", orient="records")
# make genes searchable
gene_df = model.gene_table.copy()
gene_df = gene_df[gene_df.index.isin(model.X.index)]
gene_df["gene_id"] = gene_df.index
gene_df = gene_df[["gene_name", "gene_id", "gene_product"]]
gene_df = gene_df.sort_values(by="gene_name").fillna(value="not available")
if not (os.path.isdir(main_folder + "/gene_page_files")):
os.makedirs(main_folder + "/gene_page_files")
gene_df.to_json(main_folder + "/gene_page_files/gene_list.json", orient="records")
# make the html
html = '<div class="panel">\n'
html += ' <div class="panel-header">\n'
html += ' <h2 class="mb-0">\n'
html += ' <button class="btn btn-link collapsed organism" type="button"'
html += ' data-toggle="collapse" data-target="#new_org" aria-expanded="false"'
html += ' aria-controls="new_org">\n <i>'
html += model.imodulondb_table["organism"]
html += "</i>\n </button>\n </h2>\n </div>\n"
html += ' <div id="new_org" class="collapse" aria-labelledby="headingThree"'
html += ' data-parent="#organismAccordion">\n'
html += ' <div class="panel-body">\n'
html += ' <ul class="nav navbar-dark flex-column">\n'
html += ' <li class="nav-item dataset">\n'
html += ' <a class="nav-link active" href="dataset.html?organism='
html += organism
html += "&dataset="
html += dataset
html += '"><i class="fas fa-angle-right pr-2"></i>'
html += model.imodulondb_table["dataset"]
html += "\n </a>\n </li>\n"
html += " </ul>\n </div>\n </div>\n</div>"
file = open(main_folder + "/html_for_splash.html", "w")
file.write(html)
file.close()
return main_folder
def imdb_generate_im_files(
model, path_prefix=".", gene_scatter_x="start", tfcomplex_to_gene=None
):
"""
Generates all files for all iModulons in data
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
path_prefix : str, optional
Dataset folder in which to store the files (default = ".")
gene_scatter_x : str
Column from the gene table that specificies what to use on the
X-axis of the gene scatter plot (default = "start")
tfcomplex_to_gene : dict, optional
dictionary pointing complex TRN entries
to matching gene names in the gene table
ex: {"FlhDC":"flhD"}
"""
if tfcomplex_to_gene is None:
tfcomplex_to_gene = {}
for k in tqdm(model.imodulon_table.index):
make_im_directory(model, k, path_prefix, gene_scatter_x, tfcomplex_to_gene)
def imdb_generate_gene_files(model, path_prefix="."):
"""
Generates all files for all iModulons in IcaData object
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
path_prefix : str, optional
Dataset folder in which to store the files (default = ".")
Returns
-------
None
"""
for g in tqdm(model.M.index):
make_gene_directory(model, g, path_prefix)
###################################################
# iModulon-Related Outputs (and Helper Functions) #
###################################################
# Gene Table
def parse_tf_string(model, tf_str, verbose=False):
"""
Returns a list of relevant tfs from a string. Will ignore TFs not in the
trn file.
iModulonDB helper function.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
tf_str : str
String of tfs joined by '+' and '/' operators
verbose : bool, optional
Whether or nor to print outputs
Returns
-------
tfs: list
List of relevant TFs
"""
if not (type(tf_str) == str):
return [], []
if tf_str == "":
return [], []
tf_str = tf_str.replace("[", "").replace("]", "")
tfs = re.split("[+/]", tf_str)
# Check if there is an issue, just remove the issues for now.
bad_tfs = []
for tf in tfs:
tf = tf.strip()
if tf not in model.trn.regulator.unique():
if verbose:
print("Regulator not in TRN:", tf)
print(
"To remedy this, add rows to the TRN for each gene associated "
"with this regulator. Otherwise, it will be ignored in the gene"
"tables and histograms."
)
bad_tfs.append(tf)
tfs = [t.strip() for t in list(set(tfs) - set(bad_tfs))]
bad_tfs = list(set(bad_tfs))
return tfs, bad_tfs
def imdb_gene_table_df(model, k):
"""
Creates the gene table dataframe for iModulonDB
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
Returns
-------
res: ~pandas.DataFrame
DataFrame of the gene table that is compatible with iModulonDB
"""
# get TFs and large table
row = model.imodulon_table.loc[k]
tfs, _ = parse_tf_string(model, row.regulator)
res = model.view_imodulon(k)
# sort
columns = []
for c in [
"gene_weight",
"gene_name",
"old_locus_tag",
"gene_product",
"cog",
"operon",
"regulator",
]:
if c in res.columns:
columns.append(c)
res = res[columns]
res = res.sort_values("gene_weight", ascending=False)
# add TFs
for tf in tfs:
reg_genes = model.trn.gene_id[model.trn.regulator == tf].values
res[tf] = [i in reg_genes for i in res.index]
# add links
res["link"] = [model.gene_links[g] for g in res.index]
# clean up
res.index.name = "locus"
return res
# Gene Histogram
def _component_DF(model, k, tfs=None):
"""
Helper function for imdb_gene_hist_df
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
tfs : list
List of TFs (default = None)
Returns
-------
gene_table: ~pandas.DataFrame
Gene table for the iModulon
"""
df = pd.DataFrame(model.M[k].sort_values())
df.columns = ["gene_weight"]
if "gene_product" in model.gene_table.columns:
df["gene_product"] = model.gene_table["gene_product"]
if "gene_name" in model.gene_table.columns:
df["gene_name"] = model.gene_table["gene_name"]
if "operon" in model.gene_table.columns:
df["operon"] = model.gene_table["operon"]
if "length" in model.gene_table.columns:
df["length"] = model.gene_table.length
if "regulator" in model.gene_table.columns:
df["regulator"] = model.gene_table.regulator.fillna("")
if tfs is not None:
for tf in tfs:
df[tf] = [tf in regs.split(",") for regs in df["regulator"]]
return df.sort_values("gene_weight")
def _tf_combo_string(row):
"""
Creates a formatted string for the histogram legends. Helper function for
imdb_gene_hist_df.
Parameters
----------
row : ~pandas.Series
Boolean series indexed by TFs for a given gene
Returns
-------
str
A string formatted for display (i.e. "Regulated by ...")
"""
if row.sum() == 0:
return "unreg"
if row.sum() == 1:
return row.index[row][0]
if row.sum() == 2:
return " and ".join(row.index[row])
else:
return ", ".join(row.index[row][:-1]) + ", and " + row.index[row][-1]
def _sort_tf_strings(tfs, unique_elts):
"""
Sorts TF strings for the legend of the histogram. Helper function for
imdb_gene_hist_df.
Parameters
----------
tfs : list[str]
Sequence of TFs in the desired order
unique_elts : list[str]
All combination strings made by _tf_combo_string
Returns
-------
list[str]
A sorted list of combination strings that have a consistent ordering
"""
# unreg always goes first
unique_elts.remove("unreg")
sorted_elts = ["unreg"]
# then the individual TFs
for tf in tfs:
if tf in unique_elts:
sorted_elts.append(tf)
unique_elts.remove(tf)
# then pairs
pairs = [i for i in unique_elts if "," not in i]
for i in tfs:
for j in tfs:
name = i + " and " + j
if name in pairs:
sorted_elts.append(name)
unique_elts.remove(name)
# then longer combos, which won't be sorted for now
return sorted_elts + unique_elts
def imdb_gene_hist_df(model, k, bins=20, tol=0.001):
"""
Creates the gene histogram for an iModulon
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
bins : int
Number of bins in the histogram (default = 20)
tol : float
Distance to threshold for deciding if a bar is in the iModulon
(default = .001)
Returns
-------
gene_hist_table: ~pandas.DataFrame
A dataframe for producing the histogram that is compatible with
iModulonDB
"""
# get TFs
row = model.imodulon_table.loc[k]
if not (type(row.regulator) == str):
tfs = []
else:
tfs, _ = parse_tf_string(model, row.regulator)
tfs = list(set(tfs))
# get genes
DF_gene = _component_DF(model, k, tfs)
# add a tf_combo column
if len(tfs) == 0:
DF_gene["tf_combos"] = ["unreg"] * DF_gene.shape[0]
else:
tf_bools = DF_gene[tfs]
DF_gene["tf_combos"] = [
_tf_combo_string(tf_bools.loc[g]) for g in tf_bools.index
]
# get the list of tf combos in the correct order
tf_combo_order = _sort_tf_strings(tfs, list(DF_gene.tf_combos.unique()))
# compute bins
xmin = min(min(DF_gene.gene_weight), -model.thresholds[k])
xmax = max(max(DF_gene.gene_weight), model.thresholds[k])
width = (
2
* model.thresholds[k]
/ max((np.floor(2 * model.thresholds[k] * bins / (xmax - xmin) - 1)), 1)
)
xmin = -model.thresholds[k] - width * np.ceil((-model.thresholds[k] - xmin) / width)
xmax = xmin + width * bins
# column headers: bin middles
columns = np.arange(xmin + width / 2, xmax + width / 2, width)[:bins]
index = ["thresh"] + tf_combo_order + [i + "_genes" for i in tf_combo_order]
res = pd.DataFrame(index=index, columns=columns)
# row 0: threshold indices and number of unique tf combos
thresh1 = -model.thresholds[k]
thresh2 = model.thresholds[k]
num_combos = len(tf_combo_order)
res.loc["thresh"] = [thresh1, thresh2, num_combos] + [np.nan] * (len(columns) - 3)
# next set of rows: heights of bars
for r in tf_combo_order:
res.loc[r] = np.histogram(
DF_gene.gene_weight[DF_gene.tf_combos == r], bins, (xmin, xmax)
)[0]
# last set of rows: gene names
for b_mid in columns:
# get the bin bounds
b_lower = b_mid - width / 2
b_upper = b_lower + width
for r in tf_combo_order:
# get the genes for this regulator and bin
genes = DF_gene.index[
(DF_gene.tf_combos == r)
& (DF_gene.gene_weight < b_upper)
& (DF_gene.gene_weight > b_lower)
]
# use the gene names, and get them with num2name (more robust)
genes = [model.num2name(g) for g in genes]
res.loc[r, b_mid] = len(genes)
gene_list = np.array2string(np.array(genes), separator=" ")
# don't list unregulated genes unless they are in the i-modulon
if r == "unreg":
if (b_lower + tol >= model.thresholds[k]) or (
b_upper - tol <= -model.thresholds[k]
):
res.loc[r + "_genes", b_mid] = gene_list
else:
res.loc[r + "_genes", b_mid] = "[]"
else:
res.loc[r + "_genes", b_mid] = gene_list
return res
# Gene Scatter Plot
def _gene_color_dict(model):
"""
Helper function to match genes to colors based on COG. Used by
imdb_gene_scatter_df.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
Returns
-------
dict
Dictionary associating gene names to colors
"""
try:
gene_cogs = model.gene_table.cog.to_dict()
except AttributeError:
return {k: "dodgerblue" for k in model.gene_table.index}
try:
return {k: model.cog_colors[v] for k, v in gene_cogs.items()}
except (KeyError, AttributeError):
# previously, this would call the setter using:
# data.cog_colors = None
cogs = sorted(model.gene_table.cog.unique())
model.cog_colors = dict(
zip(
cogs,
[
"red",
"pink",
"y",
"orchid",
"mediumvioletred",
"green",
"lightgray",
"lightgreen",
"slategray",
"blue",
"saddlebrown",
"turquoise",
"lightskyblue",
"c",
"skyblue",
"lightblue",
"fuchsia",
"dodgerblue",
"lime",
"sandybrown",
"black",
"goldenrod",
"chocolate",
"orange",
],
)
)
return {k: model.cog_colors[v] for k, v in gene_cogs.items()}
def imdb_gene_scatter_df(model, k, gene_scatter_x="start"):
"""
Generates a dataframe for the gene scatter plot in iModulonDB
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
gene_scatter_x : str
Determines x-axis of the scatterplot
Returns
-------
res: ~pandas.DataFrame
A dataframe for producing the scatterplot
"""
columns = ["name", "x", "y", "cog", "color", "link"]
res = pd.DataFrame(columns=columns, index=model.M.index)
res.index.name = "locus"
cutoff = model.thresholds[k]
# x&y scatterplot points - do alternatives later
if gene_scatter_x == "start":
try:
res.x = model.gene_table.loc[res.index, "start"]
except KeyError:
gene_scatter_x = "gene number"
res.x = range(len(res.index))
else:
raise ValueError("Only 'start' is supported as a gene_scatter_x input.")
# res.x = data.X[base_conds].mean(axis=1)
res.y = model.M[k]
# add other data
res.name = [model.num2name(i) for i in res.index]
try:
res.cog = model.gene_table.cog[res.index]
except AttributeError:
res.cog = "Unknown"
gene_colors = _gene_color_dict(model)
res.color = [to_hex(gene_colors[gene]) for gene in res.index]
# if the gene is in the iModulon, it is clickable
in_im = res.index[res.y.abs() > cutoff]
for g in in_im:
res.loc[g, "link"] = model.gene_links[g]
# add a row to store the threshold
cutoff_row = pd.DataFrame(
[gene_scatter_x, cutoff] + [np.nan] * 4, columns=["meta"], index=columns
).T
res = pd.concat([cutoff_row, res])
return res
# Activity Bar Graph
def generate_n_replicates_column(model):
"""
Generates the "n_replicates" column of the sample_table for iModulonDB.
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
IcaData object. Will overwrite the existing column if it exists.
Returns
-------
None: None
"""
try:
for name, group in model.sample_table.groupby(["project", "condition"]):
model.sample_table.loc[group.index, "n_replicates"] = group.shape[0]
except KeyError:
logging.warning(
"Unable to write n_replicates column. Add"
" project & condition columns (required)."
)
def imdb_activity_bar_df(model, k):
"""
Generates a dataframe for the activity bar graph of iModulon k
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
Returns
-------
res: ~pandas.DataFrame
A dataframe for producing the activity bar graph for iModulonDB
"""
samp_table = model.sample_table.reset_index(drop=True)
# get the row of A
A_k = model.A.loc[k]
A_k = A_k.rename(dict(zip(A_k.index, samp_table.index)))
# initialize the dataframe
max_replicates = int(samp_table["n_replicates"].max())
columns = ["A_avg", "A_std", "n"] + list(
chain(
*[
["rep{}_idx".format(i), "rep{}_A".format(i)]
for i in range(1, max_replicates + 1)
]
)
)
res = pd.DataFrame(columns=columns)
# iterate through conditions and fill in rows
for cond, group in samp_table.groupby(["project", "condition"], sort=False):
# get condition name and A values
cond_name = cond[0] + "__" + cond[1] # project__cond
vals = A_k[group.index]
# compute statistics
new_row = [vals.mean(), vals.std(), len(vals)]
# fill in individual samples (indices and values)
for idx in group.index:
new_row += [idx, vals[idx]]
new_row += [np.nan] * ((max_replicates - len(vals)) * 2)
res.loc[cond_name] = new_row
# clean up
res.index.name = "condition"
res = res.reset_index()
return res
# Regulon Venn Diagram
def _parse_regulon_string(model, s):
"""
The Bacillus microarray dataset uses [] to create unusually complicated
TF strings. This function parses those, as a helper to _get_reg_genes for
imdb_regulon_venn_df.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
s : str
TF string
Returns
-------
res: set
Set of genes regulated by this string
"""
res = set()
if not (isinstance(s, str)):
return res
if "/" in s:
union = s.split("] / [")
union[0] = union[0][1:]
union[-1] = union[-1][:-1]
else:
union = [s]
for r in union:
if "+" in r:
intersection = r.split(" + ")
genes = set(model.trn.gene_id[model.trn.regulator == intersection[0]])
for i in intersection[1:]:
genes = genes.intersection(
set(model.trn.gene_id[model.trn.regulator == i])
)
else:
genes = set(model.trn.gene_id[model.trn.regulator == r])
res = res.union(genes)
return res
def _get_reg_genes(model, tf):
"""
Finds the set of genes regulated by the boolean combination of regulators
in a TF string
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
tf : str
string of TFs separated by +, /, and/or []
Returns
-------
reg_genes: set[str]
Set of regulated genes
"""
# the Bacillus tf strings use '[]' to make complicated boolean combinations
if "[" in tf:
reg_genes = _parse_regulon_string(model, tf)
# other datasets can use this simpler code
else:
tf = tf.strip()
if "+" in tf:
reg_list = []
for tfx in tf.split("+"):
tfx = tfx.strip()
reg_list.append(
set(model.trn[model.trn.regulator == tfx].gene_id.unique())
)
reg_genes = set.intersection(*reg_list)
elif "/" in tf:
reg_genes = set(
model.trn[
model.trn.regulator.isin([t.strip() for t in tf.split("/")])
].gene_id.unique()
)
else:
reg_genes = set(model.trn[model.trn.regulator == tf].gene_id.unique())
# return result
return reg_genes
def imdb_regulon_venn_df(model, k):
"""
Generates a dataframe for the regulon venn diagram of iModulon k. Returns
None if there is no diagram to draw
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
Returns
-------
res: ~pandas.DataFrame
A DataFrame for producing the venn diagram in iModulonDB
"""
row = model.imodulon_table.loc[k]
tf = row["regulator"]
if not (type(tf) == str):
return None
if tf.strip() == "":
return None
# Take care of and/or enrichments
reg_genes = _get_reg_genes(model, tf)
# Get component genes
comp_genes = set(model.view_imodulon(k).index)
both_genes = set(reg_genes & comp_genes)
# Get gene and operon counts
reg_gene_count = len(reg_genes)
comp_gene_count = len(comp_genes)
both_gene_count = len(both_genes)
# Add adjustments for venn plotting (add '2' for alternates)
reg_gene_count2 = 0
comp_gene_count2 = 0
both_gene_count2 = 0
if reg_genes == comp_genes:
reg_gene_count = 0
comp_gene_count = 0
both_gene_count = 0
reg_gene_count2 = 0
comp_gene_count2 = 0
both_gene_count2 = len(reg_genes)
elif all(item in comp_genes for item in reg_genes):
reg_gene_count = 0
both_gene_count = 0
reg_gene_count2 = len(reg_genes)
comp_gene_count2 = 0
both_gene_count2 = 0
elif all(item in reg_genes for item in comp_genes):
comp_gene_count = 0
both_gene_count = 0
reg_gene_count2 = 0
comp_gene_count2 = len(comp_genes)
both_gene_count2 = 0
res = pd.DataFrame(
[
tf,
reg_gene_count,
comp_gene_count,
both_gene_count,
reg_gene_count2,
comp_gene_count2,
both_gene_count2,
],
columns=["Value"],
index=[
"TF",
"reg_genes",
"comp_genes",
"both_genes",
"reg_genes2",
"comp_genes2",
"both_genes2",
],
)
# gene lists
just_reg = reg_genes - both_genes
just_comp = comp_genes - both_genes
for i, l in zip(
["reg_genes", "comp_genes", "both_genes"], [just_reg, just_comp, both_genes]
):
gene_list = np.array([model.num2name(g) for g in l])
gene_list = np.array2string(gene_list, separator=" ")
res.loc[i, "list"] = gene_list
return res
# Regulon Scatter Plot
def get_tfs_to_scatter(model, tf_string, tfcomplex_to_genename=None, verbose=False):
"""
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
tf_string : str or ~numpy.nan
String of TFs, or np.nan
tfcomplex_to_genename : dict, optional
dictionary pointing complex TRN entries
to matching gene names in the gene table
ex: {"FlhDC":"flhD"}
verbose : bool
Show verbose output (default: False)
Returns
-------
res: list
List of gene loci
"""
# hard-coded TF names
# should just modify TRN/gene info so everything matches but ok
if tfcomplex_to_genename is None:
tfcomplex_to_genename = {}
rename_tfs = {
"csqR": "yihW",
"hprR": "yedW",
"thi-box": "Thi-box",
"FlhDC": "flhD",
"RcsAB": "rcsB",
"ntrC": "glnG",
"gutR": "srlR",
"IHF": "ihfB",
"H-NS": "hns",
"GadE-RcsB": "gadE",
}
for k, v in tfcomplex_to_genename.items():
rename_tfs[k] = v
res = []
bad_res = []
if type(tf_string) == str:
tf_string = tf_string.replace("[", "").replace("]", "")
tfs = re.split("[+/]", tf_string)
for tf in tfs:
tf = tf.strip()
if tf in rename_tfs.keys():
tf = rename_tfs[tf]
try:
b_num = model.name2num(tf)
if b_num in model.X.index:
res.append(tf)
except ValueError:
bad_res.append(tf)
if verbose:
print("TF has no associated expression profile:", tf)
print("If {} is not a gene, this behavior is expected.".format(tf))
print(
"If it is a gene, use consistent naming"
" between the TRN and gene_table."
)
res = list(set(res)) # remove duplicates
bad_res = list(set(bad_res))
return res, bad_res
def imdb_regulon_scatter_df(model, k, tfcomplex_to_genename=None):
"""
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
tfcomplex_to_genename : dict, optional
dictionary pointing complex TRN entries
to matching gene names in the gene table
ex: {"FlhDC":"flhD"}
Returns
-------
res: ~pandas.DataFrame
A dataframe for producing the regulon scatter plots in iModulonDB
"""
if tfcomplex_to_genename is None:
tfcomplex_to_genename = {}
row = model.imodulon_table.loc[k]
tfs, _ = get_tfs_to_scatter(model, row.regulator, tfcomplex_to_genename)
if len(tfs) == 0:
return None
# coordinates for points
coord = pd.DataFrame(columns=["A"] + tfs, index=model.A.columns)
coord["A"] = model.A.loc[k]
# params for fit line
param_df = pd.DataFrame(
columns=["A"] + tfs, index=["R2", "xmin", "xmid", "xmax", "ystart", "yend"]
)
# fill in dfs
for tf in tfs:
# coordinates
coord[tf] = model.X.loc[model.name2num(tf)]
xlim = np.array([coord[tf].min(), coord[tf].max()])
# fit line
params, r2 = _get_fit(coord[tf], coord["A"])
if len(params) == 2: # unbroken
y = _solid_line(xlim, *params)
out = [xlim[0], np.nan, xlim[1], y[0], y[1]]
else: # broken
xvals = np.array([xlim[0], params[2], xlim[1]])
y = _broken_line(xvals, *params)
out = [xlim[0], params[2], xlim[1], y[0], y[2]]
param_df[tf] = [r2] + out
res = | pd.concat([param_df, coord], axis=0) | pandas.concat |
# Copyright (C) 2021 ServiceNow, Inc.
""" Functionality for training all keyword prediction downstream models
and building the downstream task dataset
"""
import pandas as pd
from typing import Union, List, Callable
import tqdm
import datetime
import random
import pathlib
import numpy as np
import subprocess
import sys
import joblib
import os
import re
import json
import wandb
import sklearn
import nrcan_p2.data_processing.pipeline_utilities as pu
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.model_selection import cross_validate
from sklearn.model_selection import GroupShuffleSplit, GridSearchCV
from sklearn.model_selection import train_test_split
from filelock import FileLock
from imblearn.over_sampling import RandomOverSampler
from nrcan_p2.data_processing.vectorization import convert_dfcol_text_to_vector
from sklearn.metrics import (
accuracy_score,
precision_recall_fscore_support,
multilabel_confusion_matrix,
confusion_matrix
)
from keras import backend as K
from tensorflow import keras
import tensorflow as tf
from tensorflow.keras import layers
import pandas as pd
import pathlib
from gensim.test.utils import datapath, get_tmpfile
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import Callback
from gensim.test.utils import datapath, get_tmpfile
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
#from sklearn.multioutput import MultiOutputClassifier
from nrcan_p2.evaluation.sklearn_multioutput import MultiOutputClassifier
from nrcan_p2.evaluation.bert_keyword_prediction import main as run_bert_multiclass_script
class SupressSettingWithCopyWarning:
""" To be used in with blocks to suppress SettingWithCopyWarning """
def __enter__(self):
pd.options.mode.chained_assignment = None
def __exit__(self, *args):
pd.options.mode.chained_assignment = 'warn'
def produce_text_column(
df:pd.DataFrame,
text_column:str,
text_column_is_list:bool,
text_col_processing:str,
pre_pipeline:str,
post_pipeline:str,
):
""" Given a raw metadata df, produce the "text" column,
adding "keyword_text" column to the df.
This assumes that the input text column is either a str
or a list of str that should be somehow converted to a single str
:returns: input df, with an extra column 'keyword_text' with the
processed text from column 'text_column
"""
output_col = 'keyword_text'
# get the text
df[output_col] = df[text_column]
# the text column might be a list,
# convert to a string as necesssary, using the
# text_col_processing method
if text_column_is_list:
if text_col_processing == 'join':
df[output_col] = df[output_col].str.join(' ')
elif text_col_processing == 'first':
df[output_col] = df[output_col].str[0]
else:
raise ValueError('Unknown text_col_processing')
if pre_pipeline is not None:
dff = pu.run_pipeline(df[[output_col]],
col=output_col,
next_col=output_col,
preprocessing_pipe=pre_pipeline)
else:
dff = df
if post_pipeline is not None:
dff[output_col] = dff.apply(
lambda row: pu.run_pipeline(
row.to_frame().transpose(),
col=output_col,
next_col=output_col,
postmerge_preprocessing_pipe=post_pipeline),
axis=1
)
df[output_col] = dff[output_col]
return df
def produce_cat_column(
df,
keyword_col,
pre_pipeline,
post_pipeline
):
""" Given a raw metadata df, produce the "category" column,
adding "keyword_cat" column to the df.
This assumes that the input category column is a list of
strings.
:returns: input df, with an extra column 'keyword_cat' with the
processed text from column indicated by 'keyword_col'
"""
output_col = 'keyword_cat'
df = df.copy()
df[output_col] = df[keyword_col]
if pre_pipeline is None and post_pipeline is None:
return df
# assume it's a list of keywords
df_kw = df.explode(column=output_col)
if pre_pipeline is not None:
df_kw = pu.run_pipeline(df_kw[[output_col]],
col=output_col,
next_col=output_col,
preprocessing_pipe=pre_pipeline)
if post_pipeline is not None:
df_kw[output_col] = df_kw.apply(
lambda row: pu.run_pipeline(
row.to_frame().transpose(),
col=output_col,
next_col=output_col,
postmerge_preprocessing_pipe=post_pipeline),
axis=1
)
df_kw = df_kw.reset_index().groupby(['index']).agg(lambda x: list(x))
# the previous step inserts nan values into what should be empty lists. remove them
df_kw[output_col] = df_kw[output_col].apply(lambda x: [xx for xx in x if xx is not None and type(xx) == 'str'])
df[output_col] = df_kw[output_col]
return df
def produce_keyword_classification_dataset_from_df(
df_parquet:Union[str,pd.DataFrame],
pre_pipeline:str,
post_pipeline:str,
cat_pre_pipeline:str,
cat_post_pipeline:str,
text_column:str,
text_column_is_list:bool,
text_col_processing:str,
keyword_col:str,
n_categories:int,
task:str,
n_negative_sample:int,
do_not_drop:bool=False,
):
""" Produce a keyword classification dataset
:param df_parquet: the raw metadata file for produce a keyword dataset
as either a df or the name of a parquet file to load
:param pre_pipeline: the name of an NRCan "pre" pipeline to be used
to process the text column. A pre pipeline is one that operates
at the textbox level.
:param post_pipeline: the name of an NRCan "post" pipeline to be used
to process the text column after pre_pipeline. A post pipeline
is one that operates on the textboxes once merged, but will be
applied here per example in the input df
:param cat_pre_pipeline: the name of an NRCan "pre" pipeline to be used
to process the category column
:param cat_post_pipeline: the name of an NRCan "post" pipeline to be used
to process the category column
:param text_column: the name of the text column in the input
:param text_column_is_list: whether or not the text column is a str
or a list of str
:param keyword_col: the name of the keyword column in the input
:param n_categories: the top-n categories to maintain
:param task: the type of dataset to produce, MULTICLASS or PAIRING
:param n_negative_samples: the number of negative samples for the PAIRING
task, None to get all negative samples
:param do_not_drop: whether to not drop rows with null values
:returns: df with the columns
MULTICLASS: 'keyword_text', 'cat_X' ... for each X in the output categories
keyword_text is the text input
cat_X is 0/1 indicating the presence of a category
PAIRING: 'keyword_text', 'cat', 'label'
keyword_text is the text input
cat is the category name
label is 0/1 to indicate whether the cat matches the keyword_text
"""
if type(df_parquet) == str:
df = pd.read_parquet(df_parquet)
else:
df = df_parquet
with SupressSettingWithCopyWarning():
df = produce_text_column(
df,
text_column=text_column,
text_column_is_list=text_column_is_list,
text_col_processing=text_col_processing,
pre_pipeline=pre_pipeline,
post_pipeline=post_pipeline,
)
# get the subject
# drop None values in the keywords
with SupressSettingWithCopyWarning():
if task == 'MULTICLASS':
df['keyword_cat'] = df[keyword_col].apply(lambda x: [xx.strip() for xx in x if xx is not None] if x is not None else [])
else:
df['keyword_cat'] = df[keyword_col].apply(lambda x: [xx.strip() for xx in x if xx is not None] if x is not None else x)
df = produce_cat_column(
df,
keyword_col='keyword_cat',
pre_pipeline=cat_pre_pipeline,
post_pipeline=cat_post_pipeline,
)
vc = df['keyword_cat'].explode().value_counts()
if n_categories is None:
vc_subset = vc.index
else:
vc_subset = vc.index[0:n_categories]
if task == "MULTICLASS":
assert df.index.is_unique
mlb = MultiLabelBinarizer()
# multiclass classifier, produce one column per label
if not do_not_drop:
print(df.shape)
df = df.dropna(subset=['keyword_cat'])
print(df.shape)
t = mlb.fit_transform(df.keyword_cat)
Y = pd.DataFrame(t,columns=['cat_' + c for c in mlb.classes_])
Y = Y[['cat_' + c for c in vc_subset]]
df_ret = pd.merge(df, Y, right_index=True, left_index=True)
elif task == "PAIRING":
if not do_not_drop:
print('Dropping...')
print(df.shape)
df = df.dropna(subset=['keyword_cat'])
print(df.shape)
full_vc_set = set(vc_subset)
if n_negative_sample is not None:
def get_sampled_categories(x):
# Sample the desired number of negative examples
rest = full_vc_set.difference(x)
# if there are more elements than the sample we want, take a sample
if len(rest) > n_negative_sample:
rest = random.sample(full_vc_set.difference(x),n_negative_sample)
# otherwise, just use the full set
# probably unnecessary to check for nulls, but just in case...
if len(rest) == 0:
return None
else:
return rest
df['cat_negative_sample'] = df.keyword_cat.apply(get_sampled_categories)
else:
def get_remaining_categories(x):
# Produce all negative examples
rest = list(full_vc_set.difference(x))
if len(rest) == 0:
return None
else:
return rest
df['cat_negative_sample'] = df.keyword_cat.apply(get_remaining_categories)
print('Dropping negative samples...')
print(df.shape)
df = df.dropna(subset=['cat_negative_sample'])
print(df.shape)
df_pos = df.explode(column='keyword_cat')
df_pos['label'] = 1
df_pos['cat'] = df_pos['keyword_cat']
df_neg = df.explode(column='cat_negative_sample')
df_neg['label'] = 0
df_neg['cat'] = df_neg['cat_negative_sample']
df_ret = pd.concat([df_pos, df_neg])
df_ret = df_ret.drop(columns=['cat_negative_sample', 'keyword_cat']) #'cat_negative',
elif task == "PREDICT":
raise NotImplementedError()
return df_ret
def load_glove_model(model_path):
print(f'Loading model from {model_path}...')
glove_file = datapath(model_path)
tmp_file_name = f"{pathlib.Path(model_path).parent}/tmp_word2vec.txt"
with FileLock(str(tmp_file_name) + ".lock"):
tmp_file = get_tmpfile(tmp_file_name)
_ = glove2word2vec(glove_file, tmp_file)
model = KeyedVectors.load_word2vec_format(tmp_file)
return model
class KerasValidMetrics(Callback):
def __init__(self, val_data, batch_size = 32):
super().__init__()
self.validation_data = val_data
#self.batch_size = batch_size
def on_train_begin(self, logs={}):
self.val_micro_precision = []
self.val_micro_recall = []
self.val_micro_fb1 = []
self.val_macro_precision = []
self.val_macro_recall = []
self.val_macro_fb1 = []
self.val_sample_precision = []
self.val_sample_recall = []
self.val_sample_fb1 = []
self.val_sample_support = []
self.val_accuracy = []
def on_epoch_end(self, epoch, logs={}):
val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()
val_targ = self.validation_data[1]
metrics = compute_metrics_multiclass(val_targ, val_predict)
self.val_micro_precision.append(metrics['micro-precision'])
self.val_macro_precision.append(metrics['macro-precision'])
self.val_micro_recall.append(metrics['micro-recall'])
self.val_macro_recall.append(metrics['macro-recall'])
self.val_micro_fb1.append(metrics['micro-fb1'])
self.val_macro_fb1.append(metrics['macro-fb1'])
self.val_accuracy.append(metrics['accuracy'])
self.val_sample_precision.append(metrics['sample-precision'])
self.val_sample_recall.append(metrics['sample-recall'])
self.val_sample_fb1.append(metrics['sample-fb1'])
self.val_sample_support.append(metrics['support'])
print(f" - val_micro-precision: {metrics['micro-precision']} - val_micro-recall: {metrics['micro-recall']} - val_micro_fb1: {metrics['micro-fb1']}")
print(f" - val_macro-precision: {metrics['macro-precision']} - val_macro-recall: {metrics['macro-recall']} - val_macro_fb1: {metrics['macro-fb1']}")
print(f" - val_accuracy: {metrics['accuracy']}")
print(f" - val_sample_precision: {metrics['sample-precision']}")
print(f" - val_sample_recall: {metrics['sample-recall']}")
print(f" - val_sample_fb1: {metrics['sample-fb1']}")
print(f" - val_sample_support: {metrics['support']}")
return
def run_keyword_prediction_keras(
data_dir,
output_dir,
n_splits,
n_rerun=5,
keyword_text_col='sentence1',
label_col='label',
keyword_cat_col='cat',
task='MULTICLASS',
use_class_weight=False,
embedding_model_path=None,
njobs=None,
existing_run_dir=None,
):
""" Train a model with keras """
saved_args = locals()
maxlen = 200
if existing_run_dir is not None:
print('Starting from an existing run...')
output_dir = pathlib.Path(existing_run_dir)
assert output_dir.exists()
else:
now = datetime.datetime.today().strftime('%Y-%m-%d-%H-%M-%S%f')
output_dir_parent = pathlib.Path(output_dir)
output_dir = pathlib.Path(output_dir) / f'run-glove-keras-{task}_{now}'
output_dir.mkdir(parents=False, exist_ok=False)
input_df_log = output_dir / "input_data.log"
if not input_df_log.exists():
with open(input_df_log, 'w') as f:
json.dump({k: v.__name__ if callable(v) else v
for k,v in saved_args.items()}, f, indent=4)
embedding_model = load_glove_model(embedding_model_path)
else:
with open(input_df_log) as f:
loaded_args = json.load(f)
data_dir = loaded_args['data_dir']
n_splits = loaded_args['n_splits']
n_rerun = loaded_args['n_rerun']
keyword_text_col = loaded_args['keyword_text_col']
label_col = loaded_args['label_col']
keyword_cat_col = loaded_args['keyword_cat_col']
task = loaded_args['task']
use_class_weight = loaded_args['use_class_weight']
assert type(use_class_weight) == bool
embedding_model_path = loaded_args['embedding_model_path']
njobs = loaded_args['njobs']
print('replacing...')
print(saved_args)
print('with..')
print(loaded_args)
embedding_model = load_glove_model(embedding_model_path)
data_dir = pathlib.Path(data_dir)
models_all = {}
cv_scores_all = {}
for i in range(0,n_splits):
suffix = '.csv'
print('--------------------------------------------------')
print(f"Training split {i}...")
train_file = data_dir / f"split_{i}" / ("train" + suffix)
print(f"Train file: {train_file}")
train_df = pd.read_csv(train_file)
valid_file = data_dir / f"split_{i}" / ("valid" + suffix)
print(f"Valid file: {valid_file}")
valid_df = pd.read_csv(valid_file)
valid_df = valid_df.fillna("")
# we let the tokenizer build on both the train/val because we might
# have representations for tokens in val in our embedding already
# but these might not be in the training set
print('Building tokenizer...')
tokenizer = Tokenizer(
num_words=None,
filters="",
lower=True,
split=" ",
char_level=False,
oov_token=None,
document_count=0,
)
tokenizer.fit_on_texts(pd.concat([train_df,valid_df])[keyword_text_col].values)
train_sequences_sent1 = tokenizer.texts_to_sequences(train_df[keyword_text_col].values)
valid_sequences_sent1 = tokenizer.texts_to_sequences(valid_df[keyword_text_col].values)
word_index = tokenizer.word_index
print(f'Found {len(word_index)} unique tokens in {keyword_text_col}.')
if task == 'MULTICLASS':
X_train = keras.preprocessing.sequence.pad_sequences(train_sequences_sent1, maxlen=maxlen)
X_test = keras.preprocessing.sequence.pad_sequences(valid_sequences_sent1, maxlen=maxlen)
print(X_train.shape)
print(X_test.shape)
cols = train_df.filter(regex=keyword_cat_col).columns
Y_train = train_df.loc[:,cols].values
Y_test = valid_df.loc[:,cols].values
class_weights = Y_train.shape[0]/Y_train.sum(axis=0)
class_weights_per_datum = np.dot(Y_train, class_weights)
elif task == 'PAIRING':
raise NotImplementedError()
else:
raise ValueError(f'Unknown task {task}')
models={}
cv_scores={}
for j in range(n_rerun):
print(f"Training rerun {j}...")
sub_output_dir = output_dir / f"split_{i}_run_{j}"
print(f'...{sub_output_dir}')
if existing_run_dir is not None:
model_save_name = pathlib.Path(sub_output_dir) / "model.keras"
model_cv_results_file = pathlib.Path(sub_output_dir) / "model_history.json"
scores_file = pathlib.Path(sub_output_dir) / "metrics.json"
if model_save_name.exists() and model_cv_results_file.exists() and scores_file.exists():
print(f'...already trained for split {i} run {j}. Skipping...')
continue
else:
sub_output_dir.mkdir(parents=False, exist_ok=False)
keras_model = build_keras_model(
embedding_model=embedding_model,
task=task,
output_classes_shape=Y_train.shape[1]
)
def weighted_binary_crossentropy(y_true, y_pred, class_weights):
return K.mean(K.binary_crossentropy(tf.cast(y_true, tf.float32), y_pred) * class_weights, axis=-1)
if use_class_weight:
print(f'Training using class weighted loss..')
loss = lambda y_true, y_pred: weighted_binary_crossentropy(y_true, y_pred, class_weights)
else:
print(f'Training using normal un-class weighted loss..')
loss = 'binary_crossentropy'
keras_model.compile("adam", loss=loss)
model, cv_score = run_keras_model_cv(
X_train=X_train, Y_train=Y_train, X_test=X_test, Y_test=Y_test,
output_dir=sub_output_dir,
model=keras_model,
random_state=j,
njobs=njobs,
)
models[j] = model
cv_scores[j] = cv_score
cv_scores_all[i] = cv_scores
models_all[i] = models
return models_all, cv_scores_all
def run_keyword_prediction_classic(
data_dir,
output_dir,
clf_initializer,
n_splits,
n_rerun=5,
keyword_text_col='sentence1',
label_col='label',
keyword_cat_col='cat',
task='MULTICLASS',
use_class_weight=False,
embedding_model_path=None,
njobs=None,
use_multioutput_wrapper=False,
vectorization_method='sum',
):
""" Train a model using sklearn """
saved_args = locals()
now = datetime.datetime.today().strftime('%Y-%m-%d-%H-%M-%S%f')
output_dir_parent = pathlib.Path(output_dir)
output_dir = pathlib.Path(output_dir) / f'run-glove-{task}_{now}'
output_dir.mkdir(parents=False, exist_ok=False)
embedding_model = load_glove_model(embedding_model_path)
input_df_log = output_dir / "input_data.log"
with open(input_df_log, 'w') as f:
json.dump({k: v.__name__ if callable(v) else v
for k,v in saved_args.items()}, f, indent=4)
data_dir = pathlib.Path(data_dir)
models_all = {}
cv_scores_all = {}
for i in range(0,n_splits):
suffix = '.csv'
print('--------------------------------------------------')
print(f"Training split {i}...")
train_file = data_dir / f"split_{i}" / ("train" + suffix)
print(f"Train file: {train_file}")
train_df = pd.read_csv(train_file)
valid_file = data_dir / f"split_{i}" / ("valid" + suffix)
print(f"Valid file: {valid_file}")
valid_df = pd.read_csv(valid_file)
if task == 'MULTICLASS':
X_train = convert_dfcol_text_to_vector(train_df, keyword_text_col, embedding_model, method=vectorization_method)
# fillna if necessary
valid_df[keyword_text_col] = valid_df[keyword_text_col].fillna('')
X_test = convert_dfcol_text_to_vector(valid_df, keyword_text_col, embedding_model, method=vectorization_method)
cols = train_df.filter(regex=keyword_cat_col).columns
Y_train = train_df.loc[:,cols].values
Y_test = valid_df.loc[:,cols].values
class_weights = Y_train.shape[0]/Y_train.sum(axis=0)
class_weights_per_datum = np.dot(Y_train, class_weights)
elif task == 'PAIRING':
X1 = convert_dfcol_text_to_vector(train_df, keyword_text_col, embedding_model, method=vectorization_method)
X2 = convert_dfcol_text_to_vector(train_df, cat_text_col, embedding_model, method=vectorization_method)
X_train= np.concatenate([X1, X2], axis=1)
Y_train = train_df[label_col].values
X1 = convert_dfcol_text_to_vector(valid_df, keyword_text_col, embedding_model, method=vectorization_method)
X2 = convert_dfcol_text_to_vector(valid_df, cat_text_col, embedding_model, method=vectorization_method)
X_test= np.concatenate([X1, X2], axis=1)
Y_test = valid_df[label_col].values
else:
raise ValueError(f'Unknown task {task}')
models={}
cv_scores={}
for j in range(n_rerun):
print(f"Training rerun {j}...")
sub_output_dir = output_dir / f"split_{i}_run_{j}"
print(f'...{sub_output_dir}')
sub_output_dir.mkdir(parents=False, exist_ok=False)
model, cv_score = run_sklearn_model_cv(
X_train=X_train, Y_train=Y_train, X_test=X_test, Y_test=Y_test,
output_dir=sub_output_dir,
use_class_weight=use_class_weight,
class_weights_per_datum=class_weights_per_datum,
clf_initializer=clf_initializer,
random_state=j,
njobs=njobs,
use_multioutput_wrapper=use_multioutput_wrapper
)
models[j] = model
cv_scores[j] = cv_score
cv_scores_all[i] = cv_scores
models_all[i] = models
return models_all, cv_scores_all
def compute_metrics_multiclass(y_true, y_pred):
""" Compute multiclass metrics """
micro_precision, micro_recall, micro_fb1, support = precision_recall_fscore_support(y_true, y_pred, average='micro')
macro_precision, macro_recall, macro_fb1, support = precision_recall_fscore_support(y_true, y_pred, average='macro')
weighted_precision, weighted_recall, weighted_fb1, support = precision_recall_fscore_support(y_true, y_pred, average='weighted')
sample_precision, sample_recall, sample_fb1, support = precision_recall_fscore_support(y_true, y_pred, average=None)
confusion_matrix = multilabel_confusion_matrix(y_true, y_pred)
return {'accuracy': accuracy_score(y_true, y_pred),
'micro-precision': micro_precision,
'micro-recall': micro_recall,
'micro-fb1': micro_fb1,
'macro-precision': macro_precision,
'macro-recall': macro_recall,
'macro-fb1': macro_fb1,
'support': support.tolist(),
'sample-precision': sample_precision.tolist(),
'sample-recall': sample_recall.tolist(),
'sample-fb1': sample_fb1.tolist(),
'confusion_matrix': confusion_matrix.tolist()
}
class NumpyEncoder(json.JSONEncoder):
""" For saving dict with numpy arrays to json """
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def build_keras_model(
embedding_model,
task,
output_classes_shape,
):
""" Build the keras model """
if task == 'MULTICLASS':
inputs = keras.Input(shape=(None,), dtype="int32")
embedding_layer = embedding_model.get_keras_embedding()
x = embedding_layer(inputs)
x = layers.Bidirectional(layers.LSTM(64))(x)
outputs = layers.Dense(output_classes_shape, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.summary()
elif task == 'PAIRING':
inputs = keras.Input(shape=(None,), dtype="int32")
embedding_layer = embedding_model.get_keras_embedding()
x = embedding_layer(inputs)
x = layers.Bidirectional(layers.LSTM(64))(x)
y = embedding_layer(inputs2)
y = layers.Bidirectional(layers.LSTM(64))(x)
x = layers.Concatenate(axis=-1)([x,y])
outputs = layers.Dense(y_train.shape[1], activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.summary()
return model
def run_keras_model_cv(
X_train, Y_train, X_test, Y_test,
output_dir,
model,
random_state:float,
njobs=None,
):
""" Run cross validation for a single keras model """
print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)
X_train_train, X_train_valid, Y_train_train, Y_train_valid = train_test_split(X_train,
Y_train, test_size=0.1, random_state=random_state)
keras_valid_metrics = KerasValidMetrics(val_data=(X_train_valid, Y_train_valid))
history = model.fit(X_train_train, Y_train_train, batch_size=32, epochs=200,
validation_data=(X_train_valid, Y_train_valid),
callbacks=[keras_valid_metrics])
# save the model
model_save_name = pathlib.Path(output_dir) / "model.keras"
print(f"Saving model to {model_save_name}")
model.save(model_save_name)
model_cv_results_file = pathlib.Path(output_dir) / "model_history.json"
with open(model_cv_results_file, 'w') as f:
json.dump(history.history, f, indent=4, cls=NumpyEncoder)
preds_prob = model.predict(X_test)
preds = (preds_prob > 0.5).astype(int)
preds_prob_x = model.predict(X_train)
preds_x = (preds_prob_x > 0.5).astype(int)
print(preds_x.shape, preds.shape)
score_values = {}
test_metrics = compute_metrics_multiclass(Y_test, preds)
for metric_name,metric_value in test_metrics.items():
metric_name = f'eval_{metric_name}'
score_values[metric_name] = metric_value
print(f'{metric_name}: {score_values[metric_name]}')
train_metrics = compute_metrics_multiclass(Y_train,preds_x)
for metric_name,metric_value in train_metrics.items():
metric_name = f'train_{metric_name}'
score_values[metric_name] = metric_value
print(f'{metric_name}: {score_values[metric_name]}')
scores_file = pathlib.Path(output_dir) / "metrics.json"
print(f"Writing metrics to {scores_file}...")
with open(scores_file, 'w') as f:
json.dump(score_values, f, indent=4)
return model, score_values
def run_sklearn_model_cv(
X_train, Y_train, X_test, Y_test,
output_dir,
clf_initializer,
use_class_weight:bool,
class_weights_per_datum:np.ndarray, #only used if use_class_weight is true and is necessary
random_state:float,
njobs=None,
use_multioutput_wrapper=False,
):
""" Run cross validation for a single sklearn model """
print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)
clf, params, gs_params = clf_initializer(class_weight="balanced" if use_class_weight else None,
njobs=njobs,
random_state=random_state)
using_mlp = type(clf) in [sklearn.neural_network.MLPClassifier]
if use_multioutput_wrapper:
print('Wrapping the classifier with a multioutput classifier...')
gs_params = {f'estimator__{key}':value for key,value in gs_params.items()}
clf = MultiOutputClassifier(clf, n_jobs=njobs)
gs = GridSearchCV(clf, gs_params, refit='f1_macro', cv=5,
return_train_score=True,
scoring=['accuracy', 'f1_macro', 'f1_micro', 'f1_samples',
'recall_macro', 'recall_micro', 'recall_samples',
'precision_macro', 'precision_micro', 'precision_samples'])
if use_class_weight and using_mlp:
print('Training with sample weights....')
ros = RandomOverSampler(random_state=random_state)
if use_multioutput_wrapper:
print('..Training with multioutput oversampling...')
gs.fit(X_train,Y_train, imbalanced_sampler=ros)
else:
print('..NN dont support sampling')
gs.fit(X_train, Y_train)
else:
print('Training without sample_weights...')
gs.fit(X_train,Y_train)
# save the model
model_save_name = pathlib.Path(output_dir) / "model.joblib"
print(f"Saving model to {model_save_name}")
joblib.dump(clf, model_save_name)
gs_save_name = pathlib.Path(output_dir) / "gs.joblib"
print(f"Saving gs to {gs_save_name}")
joblib.dump(gs, gs_save_name)
model_cv_results_file = pathlib.Path(output_dir) / "model_cv_results.json"
with open(model_cv_results_file, 'w') as f:
json.dump(gs.cv_results_, f, indent=4, cls=NumpyEncoder)
model_params = pathlib.Path(output_dir) / "model_params.json"
with open(model_params, 'w') as f:
json.dump(gs.best_params_, f, indent=4, cls=NumpyEncoder)
preds = gs.predict(X_test)
preds_x = gs.predict(X_train)
print(preds_x.shape, preds.shape)
score_values = {}
test_metrics = compute_metrics_multiclass(Y_test, preds)
for metric_name,metric_value in test_metrics.items():
metric_name = f'eval_{metric_name}'
score_values[metric_name] = metric_value
print(f'{metric_name}: {score_values[metric_name]}')
train_metrics = compute_metrics_multiclass(Y_train,preds_x)
for metric_name,metric_value in train_metrics.items():
metric_name = f'train_{metric_name}'
score_values[metric_name] = metric_value
print(f'{metric_name}: {score_values[metric_name]}')
scores_file = pathlib.Path(output_dir) / "metrics.json"
print(f"Writing metrics to {scores_file}...")
with open(scores_file, 'w') as f:
json.dump(score_values, f, indent=4)
return clf, score_values
def produce_dataset_splits(
output_dir:str, # the output dir for the data
input_df_file:str, # the input file
output_name:str, # the name of the output subfolder
task:str, # PAIRING or MULTICLASS,
keyword_text_col='keyword_text',
label_col='label',
keyword_cat_col='cat',
n_splits=5,
test_size=0.8,
dropna=False,
model_type="BERT"
):
output_dir = pathlib.Path(output_dir) / output_name
if output_dir.exists():
raise ValueError(f'ERROR: output dir {output_dir} already exists')
if not output_dir.exists():
output_dir.mkdir(parents=False, exist_ok=False)
input_df_log = output_dir / "input_data.log"
with open(input_df_log, 'w') as f:
f.write(str(input_df_file))
df = | pd.read_parquet(input_df_file) | pandas.read_parquet |
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserWarning
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat
import pandas._testing as tm
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
df = DataFrame(
np.random.rand(5, 2).round(4),
columns=list("AB"),
index=["1A", "1B", "1C", "1D", "1E"],
)
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, dtype=dtype, index_col=0)
if check_orig:
expected = df.copy()
result = result.astype(float)
else:
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected = DataFrame({"A": [], "B": []}, index=[], dtype=str)
tm.assert_frame_equal(result, expected)
def test_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
expected = DataFrame(
[[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
)
expected["one"] = expected["one"].astype(np.float64)
expected["two"] = expected["two"].astype(object)
result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
tm.assert_frame_equal(result, expected)
def test_invalid_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
{"a": "category", "b": "category", "c": CategoricalDtype()},
],
)
def test_categorical_dtype(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["a", "a", "b"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}])
def test_categorical_dtype_single(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_unsorted(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", "b", "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_missing(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", np.nan, "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(all_parsers):
# see gh-18186
parser = all_parsers
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({"a": Categorical(data, ordered=True)})
actual = parser.read_csv(StringIO("a\n" + "\n".join(data)), dtype="category")
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True
)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_latin1(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "unicode_series.csv")
parser = all_parsers
encoding = "latin-1"
expected = parser.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = parser.read_csv(pth, header=None, encoding=encoding, dtype={1: "category"})
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_utf16(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "utf16_ex.txt")
parser = all_parsers
encoding = "utf-16"
sep = "\t"
expected = parser.read_csv(pth, sep=sep, encoding=encoding)
expected = expected.apply(Categorical)
actual = parser.read_csv(pth, sep=sep, encoding=encoding, dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_infer_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}),
DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]),
]
actuals = parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_explicit_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ["a", "b", "c"]
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"], categories=cats)}),
DataFrame(
{"a": [1, 2], "b": Categorical(["b", "c"], categories=cats)}, index=[2, 3]
),
]
dtype = CategoricalDtype(cats)
actuals = parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"categories",
[["a", "b", "c"], ["a", "c", "b"], ["a", "b", "c", "d"], ["c", "b", "a"]],
)
def test_categorical_category_dtype(all_parsers, categories, ordered):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(
["a", "b", "b", "c"], categories=categories, ordered=ordered
),
}
)
dtype = {"b": CategoricalDtype(categories=categories, ordered=ordered)}
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_category_dtype_unsorted(all_parsers):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(["c", "b", "a"])
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(["a", "b", "b", "c"], categories=["c", "b", "a"]),
}
)
result = parser.read_csv(StringIO(data), dtype={"b": dtype})
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_numeric(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = DataFrame({"b": Categorical([1, 1, 2, 3])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_datetime(all_parsers):
parser = all_parsers
dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None)
dtype = {"b": CategoricalDtype(dti)}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timestamp(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([Timestamp("2014")])}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = DataFrame({"b": Categorical([Timestamp("2014")] * 2)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timedelta(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(pd.to_timedelta(["1H", "2H", "3H"]))}
data = "b\n1H\n2H\n3H"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
"b\nTrue\nFalse\nNA\nFalse",
"b\ntrue\nfalse\nNA\nfalse",
"b\nTRUE\nFALSE\nNA\nFALSE",
"b\nTrue\nFalse\nNA\nFALSE",
],
)
def test_categorical_dtype_coerces_boolean(all_parsers, data):
# see gh-20498
parser = all_parsers
dtype = {"b": CategoricalDtype([False, True])}
expected = DataFrame({"b": Categorical([True, False, None, False])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_unexpected_categories(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = DataFrame({"b": Categorical(list("dacd"), dtype=dtype["b"])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_empty_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(StringIO(data), dtype={"one": "u1"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(
StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}
)
expected = DataFrame(
{"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")
)
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two,three"
result = parser.read_csv(
StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}
)
exp_idx = MultiIndex.from_arrays(
[np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)], names=["one", "two"]
)
expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
with pytest.raises(ValueError, match="Duplicate names"):
data = ""
parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})
def test_raise_on_passed_int_dtype_with_nas(all_parsers):
# see gh-2631
parser = all_parsers
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
msg = (
"Integer column has NA values"
if parser.engine == "c"
else "Unable to convert column DOY"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True)
def test_dtype_with_converters(all_parsers):
parser = all_parsers
data = """a,b
1.1,2.2
1.2,2.3"""
# Dtype spec ignored if converted specified.
with tm.assert_produces_warning(ParserWarning):
result = parser.read_csv(
StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)}
)
expected = | DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from joblib import parallel_backend
from multiprocessing import cpu_count
import os, gc, joblib
from tqdm import tqdm
from collections import defaultdict
import torch
import warnings
warnings.filterwarnings('ignore')
pd.set_option("display.max_colwidth", 100)
pd.set_option("display.max_rows", 20)
osj = os.path.join; osl = os.listdir
n_cpus = cpu_count()
class ViralDataset(torch.utils.data.Dataset):
def __init__(self, df: pd.DataFrame, feat_cols: list, mode: str):
self.X = df[feat_cols].values # [:,np.newaxis,:]
self.mode = mode
if mode != 'test':
self.targets = df['virality'].values # [:,np.newaxis] # - 1
# assert np.sum(~df['virality'].isin(list(range(5))))==0
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.mode=='test':
return torch.tensor(self.X[idx], dtype=torch.float32)
else:
return (torch.tensor(self.X[idx], dtype=torch.float32),
torch.tensor(self.targets[idx], dtype=torch.long)) # long))
class ExtractFeatsDataset(torch.utils.data.Dataset):
def __init__(self, df: pd.DataFrame, feat_cols: list, target_cols: list, mode: str):
self.X = df[feat_cols].values # [:,np.newaxis,:]
# self.target_cols = target_cols
self.mode = mode
if mode != 'test':
if len(target_cols)==1:
self.targets = df[target_cols[0]].values # [:,np.newaxis] # - 1
self.target_dtype = torch.long
else:
self.targets = df[target_cols].values # [:,np.newaxis] # - 1
self.target_dtype = torch.float32
# assert np.sum(~df['virality'].isin(list(range(5))))==0
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.mode=='test':
return torch.tensor(self.X[idx], dtype=torch.float32)
else:
return (torch.tensor(self.X[idx], dtype=torch.float32),
torch.tensor(self.targets[idx], dtype=self.target_dtype)) # long))
def to_binary_categories(df, cat_col='tweet_language_id'):
df.loc[:, cat_col] = (df[cat_col]!=0).astype(np.int8)
return df
def freq_encoding(df, freq_cols: list, main_col='tweet_id'):
for c in freq_cols:
count_df = df.groupby([c])[main_col].count().reset_index()
count_df.columns = [c, '{}_freq'.format(c)]
df = df.merge(count_df, how='left', on=c)
return df
def bin_feats(df, feats=[], n_bins_default=20):
bin_counts = defaultdict(lambda: n_bins_default)
bin_counts['user_tweet_count'] = 20
for feature in feats:
if '_binned' in feature:
continue
n_bins = bin_counts[feature]
if n_bins:
bins = np.unique(df[feature].quantile(np.linspace(0, 1, n_bins)).values)
df[feature + '_binned'] = pd.cut(
df[feature], bins=bins, duplicates='drop'
).cat.codes
return df
def to_categorical(df):
cat_cols = ['tweet_has_attachment', 'user_has_location', 'user_has_url', 'user_verified', ]
df[cat_cols] = df[cat_cols].astype('category')
return df
def change2float32(df):
float_cols = df.select_dtypes('float64').columns
df[float_cols] = df[float_cols].astype(np.float32)
return df
def merge_df2media(df, df_media):
num_media = (df_media.groupby('tweet_id')['media_id']
.nunique()
.reset_index())
df_media.drop('media_id', axis=1, inplace=True)
num_media.columns = ['tweet_id', 'num_media']
df_media = df_media.merge(num_media, how='left', on='tweet_id')
media_cols = [col for col in df_media if col not in ['tweet_id','media_id']]
df_media = df_media.groupby('tweet_id')[media_cols].mean().reset_index()
# df_media = mean_feats.merge(df_media[['tweet_id']], how='left', on='tweet_id')
# del mean_feats; _ = gc.collect()
df_media['tweet_has_media'] = True
df = df.merge(df_media, how='left', on='tweet_id')
# fillna False if tweet has no media
df['tweet_has_media'] = df['tweet_has_media'].fillna(False)
# the same for the count of number of media per tweet
df['num_media'] = df['num_media'].fillna(0).astype(np.int8)
return df
# def add_num_media_user(df):
# # todo when not debug: df['num_media'].equals(df['num_media_user'])
# num_media_user = df.groupby('tweet_id')['num_media'].sum().reset_index()
# num_media_user.columns = ['tweet_id','num_media_user']
# df = df.merge(num_media_user, how='left', on='tweet_id')
# df['num_media_user'] = df['num_media_user'].astype(np.int8)
# return df
def tweets_user_created_date(df):
for feat_ in ['tweet_created_at_year', 'tweet_created_at_month', 'tweet_created_at_day',
'tweet_created_at_hour']:
# counts_df_cols = ['tweet_user_id']+[f"tweets_in_{feat_.split('_')[-1]}_{time_}" for time_ in np.sort(df[feat_].unique())]
# tweet_user_ids = np.sort(df['tweet_user_id'].unique())
# counts_df = pd.DataFrame(index=range(tweet_user_ids), columns=counts_df_cols)
# counts_df['tweet_user_id'] = tweet_user_ids
counts_map = df.groupby('tweet_user_id')[feat_].apply(lambda x: x.value_counts())
counts_map = counts_map.unstack(level=1)
counts_map.columns = [f"tweets_in_{feat_.split('_')[-1]}_"+str(col) for col in counts_map.columns]
counts_map = counts_map.fillna(0).reset_index()
df = df.merge(counts_map, how='left', on='tweet_user_id')
return df
# n_tweets_time_user = df.groupby('tweet_user_id')[feat_].count().reset_index()
# n_tweets_time_user.columns = ['tweet_user_id', f"n_tweets_{feat_.split('_')[-1]}_user_count"]
# df = df.merge(n_tweets_time_user, how='left', on='tweet_user_id')
def create_date_col(df):
tweet_date_cols = ['tweet_created_at_year', 'tweet_created_at_month', 'tweet_created_at_day']
df['date'] = df[tweet_date_cols].apply(lambda x:
str(x['tweet_created_at_month']).strip() + '/' +
str(x['tweet_created_at_day']).strip() + '/' +
str(x['tweet_created_at_year']).strip(), axis=1)
df['date'] = pd.to_datetime(df['date'])
return df
def add_sincos(df):
hour_sine = np.sin(2 * np.pi * df['tweet_created_at_hour'] / 24.0)
hour_sine.name = 'sin_hour'
hour_cosine = np.cos(2 * np.pi * df['tweet_created_at_hour'] / 24.0)
hour_cosine.name = 'cos_hour'
df = df.join([hour_sine, hour_cosine])
return df
def add_dummy_dates(df):
year = pd.get_dummies(df.tweet_created_at_year, prefix='ohe_year')
month = pd.get_dummies(df.tweet_created_at_month, prefix='ohe_month')
day = pd.get_dummies(df.tweet_created_at_day, prefix='ohe_day')
user_year = pd.get_dummies(df.user_created_at_year, prefix='ohe_user_year')
user_month = pd.get_dummies(df.user_created_at_month, prefix='ohe_user_month')
df = df.join([year, month, day, user_year, user_month])
return df
def add_date_feats(df):
# todo OHE date
# todo to sin, cos(date)
#df_old_index = df.index
df = create_date_col(df)
df = add_sincos(df)
df = add_dummy_dates(df)
cols_resample = ['tweet_hashtag_count', 'tweet_url_count', 'tweet_mention_count',
]
date_freqs = ['1Q'] # ,'1M']
# todo DON't use _func_min if does not affect CV (low feat importance)
stats = ['sum','mean','std','max'] # ['mean', 'max', 'min', 'median', 'std']
for freq_ in date_freqs:
for stat_ in stats:
df.set_index('date', inplace=True)
g = (df.groupby('tweet_user_id').resample(freq_, closed='left')
[cols_resample].agg(stat_)
.astype(np.float32)
) # .set_index('date'))
g = g.unstack('date').fillna(0)
g.columns = [col1 + f'_func_{stat_}_' + col2.strftime('%Y-%m-%d') for (col1, col2) in g.columns]
g.reset_index(inplace=True)
# g = g.rename(columns ={col: f"{col}_rsmpl_{freq_}_func_{stat_}"
# for col in g.columns if col not in ['tweet_user_id','date']})
#df = df.reset_index().merge(g, how='left', on='tweet_user_id')
df = df.reset_index().merge(g, how='left', on='tweet_user_id')
# df.reset_index(drop=False, inplace=True)
# todo count 'tweet_id' for each period for user
today = pd.to_datetime('7/1/2021')
df['days_since_tweet'] = (today - df['date']).dt.days # .astype(int)
df['user_followers_count_2days'] = df['user_followers_count'] / df['days_since_tweet']
df['user_following_count_2days'] = df['user_following_count'] / df['days_since_tweet']
df['user_listed_on_count_2days'] = df['user_listed_on_count'] / df['days_since_tweet']
df['user_tweet_count_2days'] = df['user_tweet_count'] / df['days_since_tweet']
df['tweet_hashtag_count_2days'] = df['tweet_hashtag_count'] / df['days_since_tweet']
df['tweet_mention_count_2days'] = df['tweet_mention_count'] / df['days_since_tweet']
df['tweet_url_count_2days'] = df['tweet_url_count'] / df['days_since_tweet']
# todo not a date related functions:
df['tweet_mention_count_div_followers'] = df['tweet_mention_count'].divide(df['user_followers_count']+1)
df['tweet_url_count_div_followers'] = df['tweet_url_count'].divide(df['user_followers_count']+1)
df['tweet_hashtag_count_div_followers'] = df['tweet_hashtag_count'].divide(df['user_followers_count']+1)
df['tweet_mention_count_div_followers'] = df['tweet_mention_count'].divide(df['user_followers_count']+1)
df['tweet_mention_count_div_n_tweets'] = df['tweet_mention_count'].divide(df['user_tweet_count']+1)
df['tweet_url_count_div_n_tweets'] = df['tweet_url_count'].divide(df['user_tweet_count']+1)
df['tweet_hashtag_count_div_n_tweets'] = df['tweet_hashtag_count'].divide(df['user_tweet_count']+1)
df['tweet_mention_count_div_n_tweets'] = df['tweet_mention_count'].divide(df['user_tweet_count']+1)
df['tweet_mention_count_div_likes'] = df['tweet_mention_count'].divide(df['user_like_count']+1)
df['tweet_url_count_div_likes'] = df['tweet_url_count'].divide(df['user_like_count']+1)
df['tweet_hashtag_count_div_likes'] = df['tweet_hashtag_count'].divide(df['user_like_count']+1)
df['tweet_mention_count_div_likes'] = df['tweet_mention_count'].divide(df['user_like_count']+1)
cols_drop = ['date', 'tweet_created_at_year', 'tweet_created_at_month',
'tweet_created_at_day',
'user_created_at_year', 'user_created_at_month']
df.drop(cols_drop, axis=1, inplace=True)
return df
def ohe_func(df, cat_col, ohe_tfm=LabelBinarizer(), prefix=None):
""" OHE one categorical column of df, and return df with columns 'label_{range(1,x}' added
"""
# ohe.iloc[:, df['tweet_language_id'].tolist()]
ohe_tfm.fit(df[cat_col])
ohe_transformed = ohe_tfm.transform(df[cat_col])
if prefix:
cat_cols = [f'{prefix}_{cat_col}_{i}' for i in range(ohe_transformed.shape[1])]
else:
cat_cols = [f'{cat_col}_{i}' for i in range(ohe_transformed.shape[1])]
ohe_df = pd.DataFrame(ohe_transformed, index=df.index, columns=cat_cols)
df = pd.concat([df, ohe_df], axis=1)
df.drop(cat_col, axis=1, inplace=True)
return df
def drop_unnecessary_cols(cfg, df):
cols_drop = [] # 'tweet_created_at_year', 'tweet_created_at_month',
# 'tweet_created_at_day']
# 'days_since_user', 'user_created_at_year', 'user_created_at_month',
# 'user_verified', 'user_has_url']
if cfg.drop_rare_ohe_language_ids and cfg.one_hot_encode:
lang_leave_ids = [0, 1, 3]
cols_drop += [f'tweet_language_id_{i}' for i in range(31)
if i not in lang_leave_ids
]
for col in cols_drop:
if col in df.columns:
df.drop(col, axis=1, inplace=True)
# print(f"Dropped col: {col}")
return df
class Features():
def __init__(self,):
self.transformers = {}
self.impute_img_feature_nulls = -1
self.media_img_feat_cols = []
self.text_feat_cols = []
self.user_des_feat_cols = []
self.user_img_feat_cols = []
# union of topic ids in train and test , 0 - nan value, min=36, max=172
# xor train, test = [ 38, 117, 123, 165]
# in test but not in train = [ 38, 117, 123]
self.unique_topic_ids = [ 0, 36, 37, 38, 39, 43, 44, 45, 52, 58, 59, 60, 61,
63, 68, 71, 72, 73, 78, 79, 80, 81, 82, 87, 88, 89,
91, 93, 98, 99, 100, 101, 104, 111, 112, 117, 118, 119, 120,
121, 122, 123, 125, 126, 127, 147, 148, 149, 150, 151, 152, 153,
155, 156, 163, 165, 169, 170, 171, 172]
self.cols2int8 = ['fold', 'user_created_at_month', 'tweet_created_at_day', 'tweet_created_at_hour',
'tweet_hashtag_count', 'tweet_url_count', 'tweet_mention_count', 'tweet_has_attachment',
'virality', 'tweet_has_media', 'user_has_url', 'user_verified', 'num_media',
'user_id', 'tweet_user_id']
# 'tweet_created_at_year', 'user_created_at_year',
self.cols2int8 += [f'tweet_language_id_{i}' for i in range(30)]
def get_data_stage1(self, cfg, base_dir, n_samples=int(1e10)):
df = pd.read_csv(osj(base_dir, 'Tweets',f'train_tweets.csv'), nrows=n_samples)
test = pd.read_csv(osj(base_dir, 'Tweets',f'test_tweets.csv'), nrows=n_samples)
# test_tweet_ids = test['tweet_id'].to_list()
# self.tabular_feats.append()
df = | pd.concat([df, test]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 10 17:28:46 2020
@author: manuel.chavez
"""
import tkinter as tk
import pandas as pd
import networkx as nx
import numpy as np
from extraction import pdf_finder as pdf
from extraction.user_interface import manual_edition_ui as me
from support_modules import support as sup
class TaskEvaluator():
"""
This class evaluates the tasks durations and associates resources to it
"""
def __init__(self, process_graph, process_stats, resource_pool, settings):
"""constructor"""
self.tasks = self.get_task_list(process_graph)
self.model_data = self.get_model_data(process_graph)
self.process_stats = process_stats
self.resource_pool = resource_pool
self.pdef_method = settings['pdef_method']
self.pdef_values = dict()
self.load_pdef_values(settings)
self.one_timestamp = settings['read_options']['one_timestamp']
self.elements_data = self.evaluate_tasks()
def load_pdef_values(self, settings):
if self.pdef_method == 'apx':
self.pdef_values = settings['tasks']
elif self.pdef_method == 'apx_percentage':
# Iterator
for task in settings['percentage'].keys():
self.pdef_values[task] = (settings['percentage'][task] *
settings['enabling_times'][task])
def evaluate_tasks(self):
"""
Process the task data and association of resources
Returns
-------
elements_data : Dataframe
"""
elements_data = list()
# processing time discovery method
if self.pdef_method == 'automatic':
elements_data = self.mine_processing_time()
if self.pdef_method in ['manual', 'semi-automatic']:
elements_data = self.define_distributions_manually()
if self.pdef_method in ['apx', 'apx_percentage']:
elements_data = self.match_predefined_time()
# Resource association
elements_data = self.associate_resource(elements_data)
elements_data = elements_data.to_dict('records')
elements_data = self.add_start_end_info(elements_data)
return elements_data
def mine_processing_time(self):
"""
Performs the mining of activities durations from data
Returns
-------
elements_data : Dataframe
"""
elements_data = list()
for task in self.tasks:
s_key = 'duration' if self.one_timestamp else 'processing_time'
task_processing = (
self.process_stats[
self.process_stats.task == task][s_key].tolist())
dist = pdf.DistributionFinder(task_processing).distribution
elements_data.append({'id': sup.gen_id(),
'type': dist['dname'],
'name': task,
'mean': str(dist['dparams']['mean']),
'arg1': str(dist['dparams']['arg1']),
'arg2': str(dist['dparams']['arg2'])})
elements_data = pd.DataFrame(elements_data)
elements_data = elements_data.merge(
self.model_data[['name', 'elementid']], on='name', how='left')
return elements_data
def match_predefined_time(self):
"""
Perform the matching btween the information given by the hyper-opt
and the BPMN model and resources data
Returns
-------
elements_data : Dataframe
"""
elements_data = list()
# Predefined tasks records creation
default_record = {'type': 'EXPONENTIAL', 'mean': '0', 'arg2': '0'}
for task, value in self.pdef_values.items():
record = {
**{'id': sup.gen_id(), 'name': str(task), 'arg1': str(value)},
**default_record}
elements_data.append(record)
# Check If there is tasks with not predefined time
pdef_tasks = list(self.pdef_values.keys())
not_included = [task for task in self.tasks if task not in pdef_tasks]
default_record = {'type': 'EXPONENTIAL', 'mean': '0',
'arg1': '60', 'arg2': '0'}
for task in not_included:
elements_data.append({**{'id': sup.gen_id(), 'name': task},
**default_record})
elements_data = pd.DataFrame(elements_data)
# Matching with model info
elements_data = elements_data.merge(self.model_data[['name', 'elementid']],
on='name',
how='left').sort_values(by='name')
return elements_data
def define_distributions_manually(self):
"""
Enable the manual edition of tasks duration
Returns
-------
elements_data : Dataframe
"""
if self.pdef_method == 'semi-automatic':
elements_data = self.mine_processing_time().sort_values(by='name')
elements_data = elements_data.to_dict('records')
else:
elements_data = self.default_values()
root = tk.Tk()
window = me.MainWindow(root, elements_data)
root.mainloop()
new_elements = pd.DataFrame(window.new_elements)
elements_data = pd.DataFrame(elements_data)
elements_data = new_elements.merge(
elements_data[['id', 'name', 'elementid']], on='id', how='left')
return elements_data
def default_values(self):
"""
Performs the mining of activities durations from data
Returns
-------
elements_data : Dataframe
"""
elements_data = list()
for task in self.tasks:
s_key = 'duration' if self.one_timestamp else 'processing_time'
task_processing = (
self.process_stats[
self.process_stats.task == task][s_key].tolist())
try:
mean_time = np.mean(task_processing) if task_processing else 0
except:
mean_time = 0
elements_data.append({'id': sup.gen_id(),
'type': 'EXPONENTIAL',
'name': task,
'mean': str(0),
'arg1': str(np.round(mean_time, 2)),
'arg2': str(0)})
elements_data = pd.DataFrame(elements_data)
elements_data = elements_data.merge(
self.model_data[['name', 'elementid']], on='name', how='left')
return elements_data.to_dict('records')
def add_start_end_info(self, elements_data):
# records creation
temp_elements_data = list()
default_record = {'type': 'FIXED',
'mean': '0', 'arg1': '0', 'arg2': '0'}
for task in ['Start', 'End']:
temp_elements_data.append({**{'id': sup.gen_id(), 'name': task},
**default_record})
temp_elements_data = | pd.DataFrame(temp_elements_data) | pandas.DataFrame |
"""
动作选择
"""
import sys
import copy
from pyAudioAnalysis.audioBasicIO import read_audio_file, stereo_to_mono
import os
import pandas as pd
from random import sample, randint
import re
from pyAudioAnalysis.MidTermFeatures import mid_feature_extraction, beat_extraction
from pydub import AudioSegment
from plan2dance.Plan2Dance.common import AboutClass
from plan2dance.Plan2Dance.common import ProjectPath
"""
-----------------------------------------------------------------------------------------------------------------------
-------------------------------------------------动作选择---------------------------------------------------------------
------------------------------------------------------------------------------------------------------------------------
思路
1、先对输入音频进行分段类型预测
2、整合预测结果的类型分布,便于选择动作
3、根据整合结果和action_type.csv文件中动作类型选择动作
"""
class ActionSelect:
def __init__(self, ms):
self.ms = ms
self.action_config = ms.action_config
self.action_path = ms.action_path
self.config = self.ms.Config
self.dance_type = self.config["dance_type"]
self.segment = self.config['segment']
self.cluster_csv = os.path.join(ProjectPath, 'Data/Train/action_type.csv')
self.music_path = ms.music_path
self.weight = int(self.config['type-weight']) # 类型分布的间隔
self.action_select = int(self.config['action-select']) # action num
self.action_time = {}
self.__set_action_type_csv()
self.temporary_dir_path = self._mkdir_new_dir()
self.low_action = self.__get_low_action()
self.ms.low_action = self.low_action
def _mkdir_new_dir(self):
"""
Make a new directory to save temporary file
"""
new_dir_name = self.music_path.split('.')[0]
if not os.path.exists(new_dir_name):
os.mkdir(new_dir_name)
return new_dir_name
def _get_music_beat_info(self, start, end):
"""
获取音乐中的bpm等信息
"""
segment_path = os.path.join(self.temporary_dir_path, "temporary.wav")
audiofile = AudioSegment.from_file(self.music_path, 'mp3')
audiofile[start * 1000:end * 1000].export(
segment_path, format="wav")
[fs, x] = read_audio_file(segment_path)
x = stereo_to_mono(x) # 将双声道或立体声的信号转为单声道,声道可以说是录制时候的音源数量问题
mt_win = 1
mt_step = 2
st_win = 0.1
st_step = 0.1
try:
[_, st_features, _] = mid_feature_extraction(x, fs, round(mt_win * fs),
round(mt_step * fs),
round(fs * st_win),
round(fs * st_step))
[beat, beat_conf] = beat_extraction(st_features, st_step)
except:
print("beat特征提取错误")
beat, beat_conf = 0, 0
os.remove(segment_path)
return beat, beat_conf
def __get_low_action(self):
"""
获取时间较少的动作
"""
actions = os.listdir(self.action_path)
low_action = []
for action in actions:
cur_action = action.split('.')[0]
path = os.path.join(self.action_path, '{}.mtnx'.format(cur_action))
with open(path, 'r', encoding='utf-8') as f:
action_content = f.read()
frame = re.findall(r'frame="\d+"', action_content)[-1]
frame = re.findall(r'\d+', frame)[0]
time = round(float(frame) / 128, 3)
self.action_time[cur_action] = time
if time < 1:
low_action.append(cur_action)
return low_action
def __get_action_time(self, action_name):
"""
获取动作的时长
:param action_name: 动作名称
:return:
"""
actions = os.listdir(self.action_path)
for action in actions:
cur_action = action.split('.')[0]
if action_name.lower() == cur_action.lower():
action_name = cur_action
break
if action_name in self.action_time.keys():
return self.action_time[action_name]
else:
path = os.path.join(self.action_path, '{}.mtnx'.format(action_name))
with open(path, 'r') as f:
action_content = f.read()
frame = re.findall(r'frame="\d+"', action_content)[-1]
frame = re.findall(r'\d+', frame)[0]
time = round(float(frame) / 128, 3)
self.action_time[action_name] = time
if time < 1:
self.low_action.append(action_name)
return time
def __get_type(self):
"""
获取类型
"""
if 'cluster-count' in self.config:
cluster_count = int(self.config['cluster-count'])
type_list = []
for i in range(cluster_count):
type_list.append('type_' + str(i))
return type_list
def __select_from_csv_by_type(self, type_select, ductive_time, action_select):
"""
根据类型来选择动作
:param type_select: 类型
:return: 动作列表
"""
if self.dance_type == "pop":
self.ms.action_type_csv['type-probability'] = self.ms.action_type_csv['probability'] * \
self.ms.action_type_csv[
"type_" + str(type_select)]
sort_csv = self.ms.action_type_csv.sort_values(by="type-probability", ascending=False).drop(
columns=['type-probability'])
value = 7
actions = [action for action in sort_csv[0:value]["action"]]
if ductive_time < 4.5:
num = 5
if ductive_time < 2.5:
low_action = self.ms.low_action
cur_sort_low_action = []
actions = [action for action in sort_csv["action"]] # All include the duration in [0.5,0.8]
for action in actions:
if action in low_action:
cur_sort_low_action.append(action)
actions = cur_sort_low_action
if len(actions) >= 3:
num = 3
else:
num = len(actions)
else:
num = randint(action_select, action_select + 1)
if self.segment == "one":
actions = [action for action in sort_csv["action"]]
cur_actions = copy.copy(actions)
else:
cur_actions = sample(actions, num)
# 1、定义选择的动作为高频动作
cur_actions = [v.lower() for v in cur_actions] # 变为小写
high_list = copy.copy(cur_actions)
dance_show_actions = [action.lower() for action in sort_csv["action"]]
# 2、定义出现在已有舞蹈但不属于1的动作为中频动作
intermediate_list = list(set(dance_show_actions).difference(set(cur_actions))) # 求差集
cur_actions.extend(intermediate_list)
# 3、定义不出现在已有舞蹈的动作为低频动作
low_list = list(set([v.lower() for v in self.action_config.keys()]).difference(set(cur_actions))) # 求差集
cur_actions.extend(low_list)
action_str = str()
for action in cur_actions:
action_str += ("," + str(action))
action_frequency = self.__get_action_frequency(high_list, intermediate_list, low_list) # 定义频率字典
else:
# 民族舞没有已有数据
# 那么所有动作都是初始数据
high_list, intermediate_list = [], []
low_list = [v.lower() for v in self.action_config.keys()]
action_frequency = self.__get_action_frequency(high_list, intermediate_list, low_list)
action_str = str()
for action in low_list:
action_str += ("," + str(action))
return action_frequency, action_str[1:] # sample随机选择列表中动作,randint随机动作个数
@staticmethod
def __get_action_frequency(high_list, intermediate_list, low_list):
"""
整理动作频率
"""
cur_dict = {}
for action in high_list:
cur_dict[action] = 'high-frequency'
for action in intermediate_list:
cur_dict[action] = "intermediate-frequency"
for action in low_list:
cur_dict[action] = "low-frequency"
return cur_dict
def __set_action_type_csv(self):
"""
读取action_type_csv文件并处理其中的参数
:return:
"""
action_type_csv = pd.read_csv(self.cluster_csv)
# 对type字段进行归一化
for action_type_select in self.__get_type():
action_type_csv[action_type_select] = round(
100 * (action_type_csv[action_type_select] / sum(action_type_csv[action_type_select])), 3)
self.ms.action_type_csv = action_type_csv
def __select_action(self, segment_result):
"""
对整合后的类型分布做一定的切割,然后选择动作
:return:
"""
col = ['start', 'end', 'durative', 'action', 'type', 'beat', 'beat_conf']
action_csv = pd.DataFrame([], columns=col)
frequency_dict = dict()
point = 0
for row in segment_result:
start_time = row[0]
end_time = row[1]
action_type = row[2]
duration_time = end_time - start_time
# 分段获取每段的节拍信息
beat, beat_conf = self._get_music_beat_info(start_time, end_time)
if duration_time > 18 and self.config['segment'] != 'one':
cur_duration_time = duration_time / 2
action_frequency, actions = self.__select_from_csv_by_type(action_type, cur_duration_time,
self.action_select)
arr = [start_time, end_time - cur_duration_time, cur_duration_time, actions, action_type, beat,
beat_conf] # 时间,动作序列,类型
current_csv = pd.DataFrame([arr], columns=col)
action_csv = action_csv.append(current_csv, ignore_index=True)
# 两个值
frequency_dict[point] = action_frequency
point += 1
action_frequency, actions = self.__select_from_csv_by_type(action_type, cur_duration_time,
self.action_select)
arr = [start_time + cur_duration_time, end_time, cur_duration_time, actions, action_type, beat,
beat_conf] # 时间,动作序列,类型
current_csv = pd.DataFrame([arr], columns=col)
action_csv = action_csv.append(current_csv, ignore_index=True)
frequency_dict[point] = action_frequency
else:
action_frequency, actions = self.__select_from_csv_by_type(action_type, duration_time,
self.action_select)
arr = [start_time, end_time, duration_time, actions, action_type, beat, beat_conf] # 时间,动作序列,类型
current_csv = pd.DataFrame([arr], columns=col)
action_csv = action_csv.append(current_csv, ignore_index=True)
# 返回动作列表
frequency_dict[point] = action_frequency
point += 1
return frequency_dict, action_csv
def __select_action_folk_dance(self, segment_result):
"""
对整合后的类型分布做一定的切割,然后选择动作
:return:
"""
col = ['start', 'end', 'durative', 'action', 'type', 'beat', 'beat_conf']
action_csv = pd.DataFrame([], columns=col)
frequency_dict = dict()
point = 0
for row in segment_result:
start_time = row[0]
end_time = row[1]
action_type = row[2]
duration_time = end_time - start_time
# 分段获取每段的节拍信息
beat, beat_conf = self._get_music_beat_info(start_time, end_time)
if duration_time > 18 and self.config['segment'] != 'one':
cur_duration_time = duration_time / 2
action_frequency, actions = self.__select_from_csv_by_type(action_type, cur_duration_time,
self.action_select)
arr = [start_time, end_time - cur_duration_time, cur_duration_time, actions, action_type, beat,
beat_conf] # 时间,动作序列,类型
current_csv = | pd.DataFrame([arr], columns=col) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `wkr.pd` package."""
import pandas as pd
import pytest
from wkr.pd import pandas_memoize
def dataframe_gen():
"""Generate a number of DataFrames."""
d = {
"one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]),
}
yield pd.DataFrame(d, index=["d", "b", "a"])
df = pd.DataFrame(d, index=["d", "b", "a"], columns=["two", "three"])
df["three"] = pd.to_numeric(df["three"])
yield df
yield pd.DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]},
index=["a", "b", "c", "d"],
)
yield pd.DataFrame([{"a": 1, "b": 2}, {"a": 5, "b": 10, "c": 20}])
def test_memoize_pandas_save(tmpdir):
"""Test that `wkr.pd.memoize_pandas` saves to CSV."""
for idx, df in enumerate(dataframe_gen()):
filename = tmpdir.join("data{}.csv".format(idx))
@pandas_memoize(filename.strpath)
def f():
return df
assert not filename.exists()
df2 = f()
assert df2.equals(df)
assert filename.exists()
df3 = pd.read_csv(filename.strpath, encoding="utf-8", index_col=0)
assert df3.equals(df)
def test_memoize_pandas_load(tmpdir):
"""Test that `wkr.pd.memoize_pandas` loads from CSV."""
for idx, df in enumerate(dataframe_gen()):
filename = tmpdir.join("data{}.csv".format(idx))
# define a memoized function that returns a known value
@pandas_memoize(filename.strpath)
def f():
return df
# now write to its CSV file a value that is different
df2 = pd.DataFrame(
{"x": list(range(40, 30, -1)), "y": list(range(20, 30))},
index=list(range(5, 15)),
)
assert not df2.equals(df)
df2.to_csv(filename.strpath, encoding="utf-8")
assert filename.exists()
# show that f() now returns df2, not df
df3 = f()
assert not df3.equals(df)
assert df3.equals(df2)
# remove the CSV file
filename.remove()
@pytest.mark.skip(reason="fails on travis CI")
def test_memoize_pandas_parse_dates(tmpdir):
"""Test `wkr.pd.memoize_pandas` on loading datetimes."""
filename = tmpdir.join("data.csv")
@pandas_memoize(filename.strpath)
def f():
return pd.DataFrame(
list(range(72)),
columns=["count"],
index=pd.date_range("1/1/2011", periods=72, freq="H"),
)
assert not filename.exists()
df = f()
assert filename.exists()
df2 = f()
# DataFrames are equal even if their types aren't the same
assert df2.equals(df)
assert df2.index.dtype != df.index.dtype
@pandas_memoize(filename.strpath, parse_dates=True)
def f():
return pd.DataFrame(
list(range(72)),
columns=["count"],
index= | pd.date_range("1/1/2011", periods=72, freq="H") | pandas.date_range |
from pathlib import Path
import numpy as np
import pandas as pd
from src import utils
class ARCPData():
# american red cross preparedness data
def __init__(self, ACS, file_name = 'ARC Preparedness Data.csv' ):
self.data = None
self.file_name = utils.DATA['master'] / file_name
self.Load()
self.standardizeColumnNames(ACS)
def Load(self):
self.data = pd.read_csv(self.file_name)
def standardizeColumnNames(self, ACS):
"""
Standardizes column names
"""
df = self.data
df.columns = map(str.lower, df.columns)
df.columns = df.columns.str.replace(', ', '_')
df.columns = df.columns.str.replace('-', '_')
df.columns = df.columns.str.replace('/', '_')
df.columns = df.columns.str.replace('(', '_')
df.columns = df.columns.str.replace(')', '_')
df.columns = df.columns.str.replace(' ', '_')
df.dropna(inplace = True)
# trim geoid leading saftey marks
df['geoid'] = df['geoid'].str[2:]
df = df[df['geoid'].isin(ACS.tot_pop.index)]
self.data = df
class ACSData():
# TODO: typechecking
def __init__(self,year = 2016,level = 'block_group', pop_thresh = 0):
self.file_name = utils.DATA['acs'] / "acs_{}_data.csv".format(year)
self.level = level
self.data = None
self.tot_pop = None
self.pop_thresh = pop_thresh
self.Load()
self.Clean(self.data)
self.Munge(self.data,self.tot_pop, self.pop_thresh, self.level)
def Load(self):
self.data = pd.read_csv(self.file_name, dtype = {'GEOID':'object'}, index_col = 1)
def Clean(self,ACS):
## Cleans ACS data
# 'ACS' - ACS variable from LoadACS
# 'self.level' - geography level to munge the data to
# levels can be found in utils.GEOID
# #Note: this can function can only aggregate data
# Ensures GEOID variable is in the correct format and sets it as the dataframe index
ACS.reset_index(inplace = True)
ACS['GEOID'] = ACS['geoid'].str[2:]
ACS.set_index(['GEOID'],inplace = True)
ACS.drop('geoid','columns',inplace =True)
# Removes extraneous features (i.e. non-numeric) in the dataframe
if 'Unnamed: 0' in ACS.columns:
ACS.drop('Unnamed: 0','columns',inplace= True)
if 'NAME' in ACS.columns:
ACS.drop('NAME','columns',inplace= True)
#if 'inc_pcincome' in ACS.columns:
# ACS.drop('inc_pcincome','columns',inplace= True)
self.tot_pop = ACS[['tot_population']].groupby('GEOID').sum()
# Drop all total count columns in ACS and keeps all percentage columns
#cols = ACS.columns.to_list()
#print(cols)
#for col in cols:
# if col.find('tot') != -1 :
# print(col)
# ACS.drop(col,'columns', inplace = True)
# Remove missing values from dataframe
ACS.replace([np.inf, -np.inf], np.nan,inplace = True)
#ACS.dropna(inplace = True)
self.data = ACS
def Munge(self,ACS,tot_pop, pop_thresh,level='block_group'):
## ACS Munging
#ACS.drop(ACS.loc[:, 'state':'in_poverty'], inplace = True, axis = 1)
#print(ACS.columns)
#education adjustment
ACS['educ_less_12th'] = ACS.loc[:,'educ_nursery_4th':'educ_12th_no_diploma'].sum(axis =1 )
ACS['educ_high_school'] = ACS.loc[:,'educ_high_school_grad':'educ_some_col_no_grad'].sum(axis =1 )
ACS.drop(ACS.loc[:, 'educ_nursery_4th':'educ_some_col_no_grad'], inplace = True, axis = 1)
# house age adjustment
ACS['house_yr_pct_before_1960'] =ACS.loc[:,'house_yr_pct_1950_1959':'house_yr_pct_earlier_1939'].sum(axis =1 )
ACS['house_yr_pct_after_2000'] = ACS.loc[:, 'house_yr_pct_2014_plus':'house_yr_pct_2000_2009'].sum(axis = 1 )
ACS['house_yr_pct_1960_2000'] = ACS.loc[:, 'house_yr_pct_1990_1999':'house_yr_pct_1960_1969'].sum(axis = 1 )
ACS.drop(ACS.loc[:, 'house_yr_pct_2014_plus':'house_yr_pct_earlier_1939'], inplace = True, axis = 1)
# housing Price adjustment
ACS['house_val_less_50K']=ACS.loc[:,'house_val_less_10K':'house_val_40K_50K'].sum(axis =1 )
ACS['house_val_50_100K']=ACS.loc[:,'house_val_50K_60K':'house_val_90K_100K'].sum(axis =1 )
ACS['house_val_100K_300K']=ACS.loc[:,'house_val_100K_125K':'house_val_250K_300K'].sum(axis =1 )
ACS['house_val_300K_500K']=ACS.loc[:,'house_val_300K_400K':'house_val_400K_500K'].sum(axis =1 )
ACS['house_val_more_500K'] = ACS.loc[:,'house_val_500K_750K':'house_val_more_2M'].sum(axis = 1)
ACS.drop(ACS.loc[:, 'house_val_less_10K':'house_val_more_2M'], inplace = True, axis = 1)
ACS['race_pct_black_or_amind'] = ACS.loc[:,'race_pct_black'] \
+ ACS.loc[:,'race_pct_amind']
ACS['pct_alt_heat'] = ACS.loc[:,'heat_pct_fueloil_kerosene'] \
+ ACS.loc[:,'heat_pct_coal'] \
+ ACS.loc[:,'heat_pct_wood'] \
+ ACS.loc[:,'heat_pct_bottled_tank_lpgas']
#print(ACS.columns)
self.data = ACS
# munge to appropriate level
if self.level =='block_group':
#ACS data already at block_group level
self.tot_pop = tot_pop
else:
Data = self.data
Data = Data.multiply(tot_pop['tot_population'],axis= 'index')
Data.index , tot_pop.index = Data.index.str[0:utils.GEOID[level]], \
tot_pop.index.str[0:utils.GEOID[level]]
Data, tot_pop = Data.groupby(Data.index).sum(), \
tot_pop.groupby(tot_pop.index).sum()
self.data = Data.divide(tot_pop['tot_population'],axis = 'index')
self.tot_pop = tot_pop
#only get geoids with population greater than user defined value
self.tot_pop = self.tot_pop[self.tot_pop['tot_population']>=self.pop_thresh]
self.data = self.data[self.data.index.isin(self.tot_pop.index)]
class SVIData():
# TODO: typechecking
# level and year are fixe
def __init__(self,ACS):
self.file_name = utils.DATA['svi'] / "SVI Tract Data.csv"
self.data = None
self.Load()
self.Clean(ACS)
def Load(self):
self.data = pd.read_csv(self.file_name, encoding='ISO-8859-1')
self.data['Tract'] = self.data['GEOID'].str[2:]
def Clean(self, ACS):
ACS['Tract'] = ACS.index.str[:-1]
ACS['geos'] = ACS.index
merged = ACS.merge(self.data, how = 'left', left_on = 'Tract' , right_on ='Tract')
merged.set_index('geos', inplace=True)
cols = ['inc_pct_poverty','RPL_THEME1', 'RPL_THEME2', 'RPL_THEME3','RPL_THEME4']
self.data = merged[cols]
class NFIRSData():
def __init__(self,level,tot_pop,pop_thresh = 0, sev=False, min_loss = 10000):
self.file_name = utils.DATA['master'] /'NFIRS Fire Incident Data.csv'
self.tot_pop = tot_pop
self.level = level
self.severeFiresOnly = sev
self.pop_thresh = pop_thresh
self.data = None
self.fires = None
self.top10 = None
self.severeFire = None
self.min_loss = min_loss
self.Load()
# self.Clean(self.data)
# munge to appropriate level
self.Munge(self.data, self.tot_pop,self.level, self.min_loss, self.pop_thresh)
def set_sev_loss(self, min_loss):
self.min_loss = min_loss
nfirs = self.data
nfirs['severe_fire'] = 'not_sev_fire'
sev_fire_mask = (nfirs['oth_death'] > 0) | (nfirs['oth_inj'] > 0) | (nfirs['tot_loss'] >= self.min_loss) | (nfirs['tot_units_affected'] > 1)
nfirs.loc[sev_fire_mask,'severe_fire'] = 'sev_fire'
nfirs['min_loss'] = np.where(nfirs['tot_loss']>=self.min_loss,'had_min_loss','no_min_loss')
self.data = nfirs
return
def Load(self):
cols_to_use = ['state','fdid','inc_date','oth_inj','oth_death','prop_loss',
'cont_loss','tot_loss','tot_units_affected','geoid']
# Specify particular data type for geoid column
col_dtypes = {'geoid':str}
# utils.DATA['master'] / self.file_name
#Read in NFIRS dataframe
Data_path = self.file_name
Data = pd.read_csv(Data_path,
dtype = col_dtypes,
usecols = cols_to_use,
encoding='latin-1')
self.data = Data
def Munge(self, nfirs, tot_pop, level, min_loss, pop_thresh):
#NFIRS Munging
#Convert inc_date column values to python datetime type
nfirs['inc_date'] = | pd.to_datetime(nfirs['inc_date'], infer_datetime_format=True) | pandas.to_datetime |
# Exercise 3: Fitting k-Means Model and Assigning Predictions
# import data
import pandas as pd
df = | pd.read_csv('glass.csv') | pandas.read_csv |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
| pd.offsets.Day() | pandas.offsets.Day |
import json
import pandas as pd
from datetime import datetime
class Evento:
def __init__(self, json_evento=''):
if not json_evento:
self.id = ''
self.category = {}
else:
self.id = json.dumps(json_evento['id']).replace('"','')
self.category = []
json_category = json_evento["category"]
i = 0
for json_categoria in json_category:
categoria = json_categoria.strip('"')
self.category.append(categoria)
i += 1
def to_series(self):
dict_atributos = {}
for nome_categoria in self.category:
dict_atributos[nome_categoria+'_evnt'] = True
#print('lista de atributos que vai virar serie', dict_atributos)
return | pd.Series(data=dict_atributos) | pandas.Series |
"""
Functions for loading the Assistments data. Originally from
https://sites.google.com/site/assistmentsdata/home/assistment-2009-2010-data/skill-builder-data-2009-2010
"""
import pickle
import logging
import numpy as np
import pandas as pd
from .constants import (ITEM_IDX_KEY, TEMPLATE_IDX_KEY, CONCEPT_IDX_KEY, USER_IDX_KEY,
TIME_IDX_KEY, CORRECT_KEY, SINGLE)
SKILL_ID_KEY = 'skill_id'
PROBLEM_ID_KEY = 'problem_id'
TEMPLATE_ID_KEY = 'template_id'
USER_ID_KEY = 'user_id'
LOGGER = logging.getLogger(__name__)
def load_data(file_path, item_id_col=SKILL_ID_KEY, template_id_col=None, concept_id_col=None,
remove_nan_skill_ids=False, max_interactions_per_user=None,
drop_duplicates=False, min_interactions_per_user=2):
""" Load the Assistments dataset as a pandas dataframe, filter out students with only a single
interaction, and optionally truncate student histories. The columns used for item and concept
identifiers can be specified in the input arguments.
Note that multiple skill ids associated with an interaction will result in the first skill
name lexicographically being retained.
:param str file_path: path to the skill builder file
:param str item_id_col: indicates column of csv file to use for item ids
:param str template_id_col: Set a particular column to represent a template id for hierarchical
IRT. If 'single', assumes a dummy single hierarchical level; if None, no column is retained
for templates.
:param str concept_id_col: indicates column of csv file to use for concept ids. If 'single',
assumes a dummy single concept. If None, concept column is not retained.
:param bool remove_nan_skill_ids: whether to filter out interactions with NaN skill ids
:param int max_interactions_per_user: number of interactions to keep per user (default is to
keep all)
:param int min_interactions_per_user: The minimum amount of history that is required to retain a
student history.
:param bool drop_duplicates: Whether to keep only the first of rows with duplicate order_id
fields
:return: processed data, student ids corresponding to the student indices, item ids
corresponding to the item indices, template ids corresponding to the template indices, and
concept ids corresponding to the concept indices
:rtype: (pd.DataFrame, np.ndarray[int], np.ndarray[int], np.ndarray[int])
"""
data = | pd.DataFrame.from_csv(file_path) | pandas.DataFrame.from_csv |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
| pd.Series(["0", "12", "14"]) | pandas.Series |
# statiz_batter_crawler.py
# for chrome ver.97
# coded by <NAME>
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from html_table_parser import parser_functions
import pandas as pd
import sys
import io
# KOR crack prevent
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
# Load
driver = webdriver.Chrome("C:\chromedriver.exe")
driver.implicitly_wait(5)
url = 'http://www.statiz.co.kr/stat.php?re=1&lr='
# Dataframe list
df_list = []
# lists for birth
ds_pit_names_list=[]
ds_pit_birth_list=[]
driver.get(url)
while True:
# Get table
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
table = soup.find_all("table")
ds_pit_names=soup.select('#fixcol > table > tbody > tr > td > a')
for ds_pit_name in ds_pit_names:
ds_pit_names_list.append(ds_pit_name.get_text())
ds_pit_birth_list.append(ds_pit_name['href'][-10:])
# Parsing
p = parser_functions.make2d(table[1])
# Set DataFrame
df_temp = | pd.DataFrame(p[2:], columns=p[0]) | pandas.DataFrame |
"""
Testing model.py module
"""
from unittest import TestCase
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_index_equal, \
assert_frame_equal
from forecast_box.model import *
# TODO: Check forward steps <= 0
class ExampleModel(Model):
def __init__(self, forward_steps, ar_order, **kwargs):
Model.__init__(self, forward_steps, ar_order, **kwargs)
def _train_once(self, y_train, X_train):
return {'theta': 123}
def _predict_once(self, X_test, forward_step):
return pd.Series(data=[9, 9, 9],
index= | pd.date_range('2000-01-03', periods=3) | pandas.date_range |
import os
import sys
import time
import logging
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import backend as K
from tensorflow.keras.metrics import binary_crossentropy, mean_squared_error
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
import candle
DATA_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Examples/rnagen/'
logger = logging.getLogger(__name__)
additional_definitions = [
{'name': 'latent_dim', 'type': int, 'default': 10,
'help': "latent dimensions"},
{'name': 'model', 'default': 'cvae',
'help': 'generator model to use: ae, vae, cvae'},
{'name': 'top_k_types', 'type': int, 'default': 20,
'help': 'number of top sample types to use'},
{'name': 'n_samples', 'type': int, 'default': 10000,
'help': 'number of RNAseq samples to generate'},
{'name': 'plot', 'type': candle.str2bool,
'help': 'plot test performance comparision with and without synthetic training data'}
]
required = ['latent_dim', 'model', 'top_k_types', 'n_samples', 'plot']
class BenchmarkRNAGen(candle.Benchmark):
def set_locals(self):
"""Functionality to set variables specific for the benchmark
- required: set of required parameters for the benchmark.
- additional_definitions: list of dictionaries describing the additional parameters for the
benchmark.
"""
if required is not None:
self.required = set(required)
if additional_definitions is not None:
self.additional_definitions = additional_definitions
def initialize_parameters(default_model='rnagen_default_model.txt'):
# Build benchmark object
rnagenBmk = BenchmarkRNAGen(file_path, default_model, 'keras',
prog='rnagen_baseline',
desc='RNAseq generator')
# Initialize parameters
gParameters = candle.finalize_parameters(rnagenBmk)
# logger.info('Params: {}'.format(gParameters))
return gParameters
def get_file(url):
fname = os.path.basename(url)
return candle.get_file(fname, origin=url, cache_subdir='Examples')
def impute_and_scale(df, scaling='std', imputing='mean', dropna='all'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
if dropna:
df = df.dropna(axis=1, how=dropna)
else:
empty_cols = df.columns[df.notnull().sum() == 0]
df[empty_cols] = 0
if imputing is None or imputing.lower() == 'none':
mat = df.values
else:
imputer = SimpleImputer(strategy=imputing)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return | pd.DataFrame(mat, columns=df.columns) | pandas.DataFrame |
from fuzzywuzzy import fuzz
import string
import pandas as pd
import random
import datetime
from annoy import AnnoyIndex
import numpy as np
from pandas.api.types import is_numeric_dtype
### UTILITY Methods
# from importutil import reload
#sys.path.append('c:\\users\\pkapaleeswaran\\workspacej3\\py')
class PyFrame:
x = 7
def __init__(self, cache):
self.cache = cache
@classmethod
def makefm(cls, frame):
cache = {}
cache['data']=frame
cache['version'] = 0
cache['low_version'] = 0
cache[0] = frame
return cls(cache)
@classmethod
def makefm_csv(cls, fileName):
frame = pd.read_csv(fileName)
cache = {}
cache['data']=frame
cache['version'] = 0
cache['low_version'] = 0
cache[0] = frame
return cls(cache)
def id_generator(self, size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def makeCopy(this):
version = 0
if(version in this.cache):
if('high_version' in this.cache):
version = this.cache['high_version']
else:
version = this.cache['version']
version = version+1
this.cache[version] = this.cache['data'].copy()
this.cache['version'] = version
this.cache['high_version'] = version
# falls back to the last version
def fallback(this):
version = this.cache['version']
low_version = this.cache['low_version']
if(version in this.cache and version > low_version):
this.cache['high_version'] = version
version = version - 1
this.cache['version'] = version
this.cache['data'] = this.cache[version]
else:
print ('cant fall back. In the latest')
def calcRatio(self, actual_col, predicted_col):
result = []
#actual_col = actual_col.unique
#predicted_col = predicted_col.unique
for x in actual_col:
for y in predicted_col:
ratio = fuzz.ratio(x,y)
ratio = 1 - (ratio / 100)
if(ratio != 0):
data = [x,y,ratio]
result.append(data)
result = pd.DataFrame(result)
return result
def match(this, actual_col, predicted_col):
cache = this.cache
key_name = actual_col + "_" + predicted_col
if( not(key_name in cache)):
print ('building cache', key_name)
daFrame = cache['data']
var_name = this.calcRatio(daFrame[actual_col].unique(), daFrame[predicted_col].unique())
var_name.columns = ['col1','col2','distance']
cache[key_name] = var_name
var_name = cache[key_name]
#print(var_name.head())
# seems like we dont need that right now
#output = var_name[(var_name[2] > threshold) & (var_name[2] != 100)]
return var_name
def drop_col(this, col_name, inplace=True):
frame = this.cache['data']
if not inplace:
this.makeCopy()
frame.drop(col_name, axis =1, inplace=True)
this.cache['data'] = frame
def split(this, col_name, delim, inplace=True):
frame = this.cache['data']
if not inplace:
this.makeCopy()
col_to_split = frame[col_name]
splitcol = col_to_split.str.split(delim, expand = True)
for len in splitcol:
frame[col_name + '_' + str(len)] = splitcol[len]
this.cache['data'] = frame
return frame
def replace_val(this, col_name, old_value, new_value, regx=True, inplace=True):
frame = this.cache['data']
if not inplace:
this.makeCopy()
nf = frame.replace({col_name: old_value}, {col_name: new_value}, regex=regx, inplace=inplace)
print('replacing inplace')
#this.cache['data'] = nf
def replace_val2(this, col_name, cur_value, new_col, new_value, regx= True, inplace=True):
frame = this.cache['data']
if not inplace:
this.makeCopy()
nf = frame.replace({col_name: cur_value}, {new_col: new_value}, regex=regx, inplace=inplace)
print('replacing inplace')
def upper(this, col_name, inplace=True):
if not inplace:
this.makeCopy()
frame = this.cache['data']
column = frame[col_name]
frame[col_name] = column.str.upper()
this.cache['data'] = frame
def lower(this, col_name, inplace=True):
if not inplace:
this.makeCopy()
frame = this.cache['data']
column = frame[col_name]
frame[col_name] = column.str.lower()
this.cache['data'] = frame
def title(this, col_name, inplace=True):
if not inplace:
this.makeCopy()
frame = this.cache['data']
column = frame[col_name]
frame[col_name] = column.str.title()
this.cache['data'] = frame
def concat(this, col1, col2, newcol, glue='_', inplace=True):
if not inplace:
this.makeCopy()
frame = this.cache['data']
frame[newcol] = frame[col1] + glue + frame[col2]
this.cache['data'] = frame
def mathcat(this, operation, newcol, inplace=True):
if not inplace:
this.makeCopy()
frame = this.cache['data']
frame[newcol] = operation
this.cache['data'] = frame
def dupecol(this, oldcol, newcol, inplace=True):
if not inplace:
this.makeCopy()
frame = this.cache['data']
frame[newcol] = frame[oldcol]
this.cache['data'] = frame
# dropping row has to be done from inside java
# newframe = mv[mv['Genre'] != 'Drama']
# change type is also done from java
# The actual type is sent from java
def change_col_type(this, col, type, inplace=True):
frame = this.cache['data']
if not inplace:
this.makeCopy()
frame[col] = frame[col].astype(type)
this.cache['data'] = frame
# Index in euclidean space tc.
# input is a pandas frame
# the first column is typically the identifier
# The remaining are the vector
def buildnn(this, trees=10, type='euclidean'):
frame = this.cache['data']
cols = len(frame.columns) - 1
t = AnnoyIndex()
for i, row in frame:
t.add_item(i, row[1:])
this.cache['nn'] = t
# drops non numeric data columns from the frame
def dropalpha(this, inplace=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
df = this.cache['data']
if not inplace:
this.makeCopy()
this.cache['data'] = df.select_dtypes(include=numerics)
# drops non numeric data columns from the frame
def dropnum(this, inplace=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
df = this.cache['data']
if not inplace:
this.makeCopy()
this.cache['data'] = df.select_dtypes(exclude=numerics)
def extract_num(this, col_name, newcol='assign', inplace=True):
if(newcol == 'assign'):
this.replace_val(col_name, '[a-zA-Z]+', '')
else:
this.dupecol(col_name, newcol)
this.replace_val(newcol, '[a-zA-Z]+', '')
def extract_alpha(this, col_name, newcol='assign', inplace=True):
if(newcol == 'assign'):
this.replace_val(col_name, '\d+', '')
else:
this.dupecol(col_name, newcol)
this.replace_val(newcol, '\d+', '')
def unpivot(this, valcols, idcols=['all'], inplace=True):
frame = this.cache['data']
if not inplace:
this.makeCopy()
# assimilate all the columns if the idcols = 'all'
if idcols == ['all']:
idcols = list(set(list(frame.columns.values)) - set(valcols))
output = pd.melt(mv, id_vars=idcols, value_vars=valcols)
this.cache['data'] = output
def split_unpivot(this, col_name, delim, var = 'variable', inplace=True):
frame = this.cache['data']
if not inplace:
this.makeCopy()
col_to_split = frame[col_name]
splitcol = col_to_split.str.split(delim, expand = True)
valcols = []
for len in splitcol:
valcolname = col_name + '_' + str(len)
frame[valcolname] = splitcol[len]
valcols.append(valcolname)
#now unpivot these columns
# drop the col that is about to be replaced
this.drop_col(col_name)
print('dropped col')
idcols = list(set(list(frame.columns.values)) - set(valcols))
#reassign
frame = this.cache['data']
# change the name of variable column if one exists with same name
if var in frame.columns:
var = this.id_generator(4)
output = pd.melt(frame, id_vars=idcols, value_vars=valcols, var_name=var, value_name=col_name).dropna(subset=[col_name])
print('Dropped')
# and finally replace
# need a way to drop the none
this.cache['data'] = output
#this.cache['data'] = output[output[col_name] != 'None']
this.drop_col(var)
return output
def rename_col(this, col_name, new_col_name, inplace=True):
frame = this.cache['data']
if not inplace:
this.makeCopy()
frame = frame.rename(index = str, columns={col_name : new_col_name})
this.cache['data'] = frame
return frame
def countif(this, col_name, str_to_count, new_col='assign', inplace=True):
frame = this.cache['data']
if not inplace:
this.makeCopy()
# see if I need to assign a new name
if new_col == 'assign':
count = 0
new_col = col_name + '_' + str_to_count + '_countif'
#while new_col in frame.columns:
# count = count + 1
# new_col = col_name + '_' + count
print (new_col)
frame[new_col] = frame[col_name].str.count(str_to_count)
this.cache['data'] = frame
# val is the other columns to keep
def pivot(this, column_to_pivot, val='assign', inplace=True):
frame = this.cache['data']
if not inplace:
this.makeCopy()
if val == 'assign':
frame = frame.pivot(columns=column_to_pivot)
else:
frame = frame.pivot(columns=column_to_pivot, values=values)
this.cache['data'] = frame
# val is the other columns to keep
# index = columns to pivot
# Columns = Columns to show
# Values = actual values to pivot
#agg function = how to aggregate
# pvt = pd.pivot_table(mv, index=['Studio', 'Genre'], columns='Nominated', values='MovieBudget' ).reset_index()
# pvt.columns = pvt.columns.to_series().str.join('_')
# pvt.reset_index()
def is_numeric(this, col_name, inplace=True):
frame = this.cache['data']
return | is_numeric_dtype(frame[col_name]) | pandas.api.types.is_numeric_dtype |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from FileUtil import FileUtil
from Util import Util
from constants import Constants
from pathlib import Path
import scipy.stats as stats
import seaborn as sns
import stats.CliffDelta as cld
from scipy import stats
class AgeNorm:
def __init__(self):
self.all_age = []
# 0, 183, 365, 730, 1095, 1460, 1825
self.AGES = [0, 183, 365, 730, 1095, 1460]
def process(self):
for json_file in Path(Constants.PROCESSED_DATA).rglob('*'):
repo = json_file.name.replace(".json", "")
print("Processing {0}...".format(repo))
data = FileUtil.load_json(json_file)
bugdata = FileUtil.load_json(Constants.BASE_PATH + "bugData/" + repo + ".json")
for m in data:
method = data[m]
mtdBugData = bugdata[m]
self.all_age.append({
"age": method["changeDates"][-1],
"rev": len(method["changeDates"]) - 1,
"bug": mtdBugData["exactBug0Match"][1:].count(True),
"repo": method["repo"]
})
FileUtil.save_json(Constants.BASE_PATH + "age/all_age_bug_rev.json", self.all_age)
print("Saved all_age_bug_rev.json.....")
self.age_vs_bug_and_rev()
def age_vs_bug_and_rev(self):
if not self.all_age:
self.all_age = FileUtil.load_json(Constants.BASE_PATH + "age/all_age_bug_rev.json")
df = pd.DataFrame.from_dict(self.all_age)
result = []
result.append(
self.apply_stats(df["age"], df["rev"], "Age_vs_Revisions", "all", stats_to_apply="kendall", age_threshold=0))
result.append(
self.apply_stats(df["age"], df["bug"], "Age_vs_Bug", "all", stats_to_apply="kendall", age_threshold=0))
result_df = pd.DataFrame.from_dict(result)
print("Corr of age with revisions and bugs:")
print(result_df)
result_df.to_csv(Constants.BASE_PATH + "age/all_age_bug_rev.csv")
print("Done process age vs bug vs rev........")
def age_with_versioning(self, AGE_THRESHOLD, apply_change_filter=False):
result = {
"Age_threshold": AGE_THRESHOLD,
"data": []
}
if apply_change_filter:
outFile = Constants.BASE_PATH + "age/age_norm_onlyChangedM_" + str(AGE_THRESHOLD) + ".json"
else:
outFile = Constants.BASE_PATH + "age/age_norm_" + str(AGE_THRESHOLD) + ".json"
for json_file in Path(Constants.PROCESSED_DATA).rglob('*'):
repo = json_file.name.replace(".json", "")
data = FileUtil.load_json(json_file)
bugdata = FileUtil.load_json(Constants.BASE_PATH + "bugData/" + repo + ".json")
negCount = 0
for method in data:
method_details = data[method]
mtdBugData = bugdata[method]
if method_details["Age"] <= AGE_THRESHOLD and method_details["Age"] > 0:
shouldSkipXmethods = True
else:
shouldSkipXmethods = False
if apply_change_filter:
if len(method_details["changeDates"]) == 1:
continue
sloc_version = {}
track = 0
for i in range(1, len(method_details["diffSizes"])):
track = 1
if method_details["changeDates"][i] > AGE_THRESHOLD:
if AGE_THRESHOLD != 0:
break
prevSLOC = method_details["sloc"][i - 1]
if prevSLOC not in sloc_version:
sloc_version[prevSLOC] = {
"allChanges": 1,
"essentialChanges": 1 if method_details["isEssentialChange"][i] else 0,
"bodychanges": 1 if Util.is_body_change(method_details["changeTypes"][i]) else 0,
"minorchanges": 1 if not method_details["isEssentialChange"][i] else 0,
"diffSizes": method_details["diffSizes"][i],
"newAdditions": method_details["newAdditions"][i],
"isGetterOrSetter": method_details["isGetter"][i] or method_details["isSetter"][i],
"editDistance": method_details["editDistance"][i],
"repo": method_details["repo"],
"shouldSkipXMethods": shouldSkipXmethods
}
if mtdBugData["exactBug0Match"][i]:
sloc_version[prevSLOC]["bugCount"] = 1
else:
sloc_version[prevSLOC]["bugCount"] = 0
else:
sloc_version[prevSLOC]["allChanges"] += 1
essentialChanges = 1 if method_details["isEssentialChange"][i] else 0
bodychanges = 1 if Util.is_body_change(method_details["changeTypes"][i]) else 0
minorchanges = 1 if not method_details["isEssentialChange"][i] else 0
sloc_version[prevSLOC]["essentialChanges"] += essentialChanges
sloc_version[prevSLOC]["bodychanges"] += bodychanges
sloc_version[prevSLOC]["minorchanges"] += minorchanges
sloc_version[prevSLOC]["diffSizes"] += method_details["diffSizes"][i]
sloc_version[prevSLOC]["newAdditions"] += method_details["newAdditions"][i]
sloc_version[prevSLOC]["editDistance"] += method_details["editDistance"][i]
if mtdBugData["exactBug0Match"][i]:
sloc_version[prevSLOC]["bugCount"] += 1
if track == 0:
sloc_version[method_details["sloc"][0]] = {
"allChanges": 0,
"essentialChanges": 0,
"bodychanges": 0,
"minorchanges": 0,
"diffSizes": 0,
"newAdditions": 0,
"bugCount": 0,
"editDistance": 0,
"shouldSkipXMethods": shouldSkipXmethods,
"isGetterOrSetter": method_details["isGetter"][0] or method_details["isSetter"][0],
"repo": method_details["repo"]
}
for sloc in sloc_version:
result["data"].append({
"sloc": int(sloc),
"allChanges": sloc_version[sloc]["allChanges"],
"essentialChanges": sloc_version[sloc]["essentialChanges"],
"bodychanges": sloc_version[sloc]["bodychanges"],
"minorchanges": sloc_version[sloc]["minorchanges"],
"diffSizes": sloc_version[sloc]["diffSizes"],
"newAdditions": sloc_version[sloc]["newAdditions"],
"editDistance": sloc_version[sloc]["editDistance"],
"change_by_sloc": round(sloc_version[sloc]["essentialChanges"] / int(sloc), 4),
"bugCount": sloc_version[sloc]["bugCount"],
"repo": sloc_version[sloc]["repo"],
"shouldSkipXMethods": sloc_version[sloc]["shouldSkipXMethods"],
"isGetterOrSetter": sloc_version[sloc]["isGetterOrSetter"]
})
FileUtil.save_json(outFile, result)
print("Done age norm for age threshold {0}".format(AGE_THRESHOLD))
# interval = [183, 365, 730, 1095, 1460, 5]
def interval_age_versioning(self, age_interval=[0, 183], exclude_x_methods=False):
interval_range = str(age_interval[0]) + "-" + str(age_interval[1])
result = {
"interval": interval_range,
"data": []
}
if exclude_x_methods:
outfile = Constants.BASE_PATH + "interval/interval_age_excluded_" + interval_range + ".json"
else:
outfile = Constants.BASE_PATH + "interval/interval_age_" + interval_range + ".json"
for json_file in Path(Constants.PROCESSED_DATA).rglob('*'):
repo = json_file.name.replace(".json", "")
data = FileUtil.load_json(json_file)
bugdata = FileUtil.load_json(Constants.BASE_PATH + "bugData/" + repo + ".json")
for method in data:
method_details = data[method]
mtdBugData = bugdata[method]
# if method_details["Age"] <= AGE_THRESHOLD:
# continue
if exclude_x_methods:
if age_interval[1] != 5:
if method_details["Age"] < age_interval[1] and method_details["Age"] > 0:
continue
elif age_interval[1] == 5:
if method_details["Age"] < age_interval[0] and method_details["Age"] > 0:
continue
sloc_version = {}
track = 0
for i in range(1, len(method_details["diffSizes"])):
track = 1
if age_interval[1] != 5:
if age_interval[1] >= method_details["changeDates"][i] >= age_interval[0]:
interval = interval_range
else:
continue
if age_interval[1] == 5 and not (method_details["changeDates"][i] >= age_interval[0]):
continue
# if method_details["isEssentialChange"][i]:
prevSLOC = str(method_details["sloc"][i - 1])
if prevSLOC not in sloc_version:
sloc_version[prevSLOC] = {
"allChanges": 1,
"essentialChanges": 1 if method_details["isEssentialChange"][i] else 0,
"bodychanges": 1 if Util.is_body_change(method_details["changeTypes"][i]) else 0,
"minorchanges": 1 if not method_details["isEssentialChange"][i] else 0,
"diffSizes": method_details["diffSizes"][i],
"newAdditions": method_details["newAdditions"][i],
"isGetterOrSetter": method_details["isGetter"][i] or method_details["isSetter"][i],
"editDistance": method_details["editDistance"][i],
"repo": method_details["repo"],
"interval": interval_range
}
if mtdBugData["exactBug0Match"][i]:
sloc_version[prevSLOC]["bugCount"] = 1
else:
sloc_version[prevSLOC]["bugCount"] = 0
else:
sloc_version[prevSLOC]["allChanges"] += 1
essentialChanges = 1 if method_details["isEssentialChange"][i] else 0
bodychanges = 1 if Util.is_body_change(method_details["changeTypes"][i]) else 0
minorchanges = 1 if not method_details["isEssentialChange"][i] else 0
sloc_version[prevSLOC]["essentialChanges"] += essentialChanges
sloc_version[prevSLOC]["bodychanges"] += bodychanges
sloc_version[prevSLOC]["minorchanges"] += minorchanges
sloc_version[prevSLOC]["diffSizes"] += method_details["diffSizes"][i]
sloc_version[prevSLOC]["newAdditions"] += method_details["newAdditions"][i]
sloc_version[prevSLOC]["editDistance"] += method_details["editDistance"][i]
if mtdBugData["exactBug0Match"][i]:
sloc_version[prevSLOC]["bugCount"] += 1
if track == 0:
if age_interval[0] == 0:
sloc_version[method_details["sloc"][0]] = {
"allChanges": 0,
"essentialChanges": 0,
"bodychanges": 0,
"minorchanges": 0,
"diffSizes": 0,
"newAdditions": 0,
"bugCount": 0,
"editDistance": 0,
"isGetterOrSetter": method_details["isGetter"][0] or method_details["isSetter"][0],
"interval": interval_range,
"repo": method_details["repo"]
}
for sc in sloc_version:
sloc = int(sc)
# interval = int(sloc_interval[1])
result["data"].append({
"sloc": int(sloc),
"allChanges": sloc_version[sc]["allChanges"],
"essentialChanges": sloc_version[sc]["essentialChanges"],
"bodychanges": sloc_version[sc]["bodychanges"],
"minorchanges": sloc_version[sc]["minorchanges"],
"diffSizes": sloc_version[sc]["diffSizes"],
"newAdditions": sloc_version[sc]["newAdditions"],
"editDistance": sloc_version[sc]["editDistance"],
"bugCount": sloc_version[sc]["bugCount"],
"repo": sloc_version[sc]["repo"],
"interval": interval_range,
"isGetterOrSetter": sloc_version[sc]["isGetterOrSetter"]
})
FileUtil.save_json(outfile, result)
print("Done preparing data in interval {0} and {1}".format(age_interval[0], age_interval[1]))
def calc_interval_corr(self, exclude_x_methods=False):
result = []
for json_file in Path(Constants.BASE_PATH + "interval/").rglob('*'):
data = FileUtil.load_json(json_file)
interval_range = data["interval"]
print("Processing interval {0}".format(interval_range))
df = pd.DataFrame.from_dict(data["data"])
for repo in Constants.ALL_MINED_REPOS:
repo_data = df[df["repo"] == repo]
if not repo_data.empty:
result.append(
self.apply_stats(
repo_data["sloc"],
repo_data["allChanges"],
"sloc-allChanges-" + interval_range,
repo,
stats_to_apply="kendall",
age_threshold=interval_range
)
)
result.append(
self.apply_stats(
repo_data["sloc"],
repo_data["bugCount"],
"sloc-bugCount-" + interval_range,
repo,
stats_to_apply="kendall",
age_threshold=interval_range
)
)
pd.DataFrame.from_dict(result).to_csv(Constants.BASE_PATH + "age/all_corr_interval_age_data.csv")
print("Done calculating corr....")
# def calc_cliff_delta_for_age_interval(self, grp, intervals=[183, 365, 730, 1095, 1825, 6]):
# df = pd.read_csv(Constants.BASE_PATH + "age/all_corr_interval_age_data.csv")
# # intervals = [365, 730, 1095, 1825, 6]
# yearHalf = df[(df["age_threshold"] == intervals[0]) & (df["group"] == grp)]
# year1 = df[(df["age_threshold"] == intervals[1]) & (df["group"] == grp)]
# year2 = df[(df["age_threshold"] == intervals[2]) & (df["group"] == grp)]
# year3 = df[(df["age_threshold"] == intervals[3]) & (df["group"] == grp)]
# year5 = df[(df["age_threshold"] == intervals[4]) & (df["group"] == grp)]
# after5 = df[(df["age_threshold"] == intervals[5]) & (df["group"] == grp)]
# result = []
# result.append(self.apply_cliff_delta(yearHalf["corr"], year1["corr"], "0.5yr", "1yr"))
# result.append(self.apply_cliff_delta(yearHalf["corr"], year2["corr"], "0.5yr", "2yr"))
# result.append(self.apply_cliff_delta(yearHalf["corr"], year3["corr"], "0.5yr", "3yr"))
# # result.append(self.apply_cliff_delta(yearHalf["corr"], year5["corr"], "0.5yr", "5yr"))
# result.append(self.apply_cliff_delta(yearHalf["corr"], after5["corr"], "0.5yr", "after5yr"))
# result.append(self.apply_cliff_delta(year1["corr"], year2["corr"], "1yr", "2yr"))
# result.append(self.apply_cliff_delta(year1["corr"], year3["corr"], "1yr", "3yr"))
# # result.append(self.apply_cliff_delta(year1["corr"], year5["corr"], "1yr", "5yr"))
# result.append(self.apply_cliff_delta(year1["corr"], after5["corr"], "1yr", "after5yr"))
# result.append(self.apply_cliff_delta(year2["corr"], year3["corr"], "2yr", "3yr"))
# # result.append(self.apply_cliff_delta(year2["corr"], year5["corr"], "2yr", "5yr"))
# result.append(self.apply_cliff_delta(year2["corr"], after5["corr"], "2yr", "after5yr"))
# # result.append(self.apply_cliff_delta(year3["corr"], year5["corr"], "3yr", "5yr"))
# result.append(self.apply_cliff_delta(year3["corr"], after5["corr"], "3yr", "after5yr"))
# # result.append(self.apply_cliff_delta(year5["corr"], after5["corr"], "5yr", "after5yr"))
# pd.DataFrame.from_dict(result).to_csv(Constants.BASE_PATH + "age/interval_wise_corr_" + grp + ".csv")
# print("Done.......")
# print("Plotting graph....")
# sns.ecdfplot(yearHalf, x=yearHalf["corr"], linewidth=1.0, marker="o", markersize=5)
# sns.ecdfplot(year1, x=year1["corr"], linewidth=1.0, marker="o", markersize=5)
# sns.ecdfplot(year2, x=year2["corr"], linewidth=1.0, marker="*", markersize=5)
# sns.ecdfplot(year3, x=year3["corr"], linewidth=1.0, marker="v", markersize=5)
# sns.ecdfplot(year5, x=year5["corr"], linewidth=1.0, marker="d", markersize=5)
# sns.ecdfplot(after5, x=after5["corr"], linewidth=1.0, marker="3", markersize=5)
# plt.legend(["0-0.5yr" , "0.5-1yr", "1-2yr", "2-3yr", "3-5yr", "after 5 yr"])
# plt.savefig(
# Constants.BASE_PATH + "age/" + grp + "_interval_change_cdf.png",
# bbox_inches='tight')
# plt.show()
def apply_stats(self, x1, x2, label, repo, stats_to_apply="kendall", age_threshold=0):
if stats_to_apply == "kendall":
corr, p_value = stats.kendalltau(x1, x2)
elif stats_to_apply == 'spearman':
corr, p_value = stats.spearmanr(x1, x2)
else:
corr, p_value = stats.pearsonr(x1, x2)
return {
"corr": round(corr, 2),
"p_value": p_value,
"significant": 'yes' if p_value < 0.05 else "no",
"group": label,
"repo": repo,
"type": stats_to_apply,
"age_threshold": age_threshold
}
def calculate_corr(self, AGES=[0, 183, 365, 730, 1095, 1460], should_exclued_less_than_x_years=False, apply_change_filter=False):
if should_exclued_less_than_x_years:
filename_to_save = "age/all_age_norm_data_without_x_years_methods"
else:
filename_to_save = "age/all_age_norm_data"
if apply_change_filter:
filename_to_save = filename_to_save + "_onlyChangedM.csv"
elif should_exclued_less_than_x_years:
filename_to_save = filename_to_save + ".csv"
else:
filename_to_save = filename_to_save + ".csv"
print("Start processing....")
result = []
for age in AGES:
print("Processing age {0}".format(age))
if apply_change_filter:
inFile = Constants.BASE_PATH + "age/age_norm_onlyChangedM_" + str(age) + ".json"
else:
inFile = Constants.BASE_PATH + "age/age_norm_" + str(age) + ".json"
data = FileUtil.load_json(inFile)
if data["Age_threshold"] == age:
df = pd.DataFrame.from_dict(data['data'])
if should_exclued_less_than_x_years:
df = df[df["shouldSkipXMethods"] == False]
# print("Number of methods {0} for age {1}...".format(df.shape[0], age))
for repo in Constants.ALL_MINED_REPOS:
repo_data = df[df["repo"] == repo]
print("Processing for repo {0}".format(repo))
# print("Number of methods {0} for age {1} in repo {2}...".format(repo_data.shape[0], age, repo))
try:
if not repo_data.empty:
result.append(
self.apply_stats(repo_data["sloc"], repo_data["allChanges"],
"sloc_vs_all_changes", repo,
stats_to_apply="kendall", age_threshold=age))
result.append(
self.apply_stats(repo_data["sloc"], repo_data["essentialChanges"],
"sloc_vs_essential_changes", repo,
stats_to_apply="kendall", age_threshold=age))
result.append(
self.apply_stats(repo_data["sloc"], repo_data["bodychanges"],
"sloc_vs_bodychanges", repo,
stats_to_apply="kendall", age_threshold=age))
result.append(
self.apply_stats(repo_data["sloc"], repo_data["minorchanges"],
"sloc_vs_minorchanges", repo,
stats_to_apply="kendall", age_threshold=age))
result.append(
self.apply_stats(repo_data["sloc"], repo_data["bugCount"], "sloc_vs_bugCount", repo,
stats_to_apply="kendall", age_threshold=age))
except Exception as e:
print(e)
else:
print("File does not exist. Create files first")
break
pd.DataFrame.from_dict(result).to_csv(Constants.BASE_PATH + filename_to_save)
print("Done processing age norm csv.....")
def plot_corr_cdf(self, stats_to_use, filter_less_than_x_age=False):
# 0, 183, 365, 730, 1825
if filter_less_than_x_age:
data = pd.read_csv(Constants.BASE_PATH + "age/all_age_norm_data_without_x_years_methods.csv")
else:
data = pd.read_csv(Constants.BASE_PATH + "age/all_age_norm_data.csv")
not_sig_data = data[(data["significant"] == "no") & (data["type"] == stats_to_use)]
not_sig_data.to_csv(Constants.BASE_PATH + "age/not_sig_age_data.csv")
data = data[data["type"] == stats_to_use]
all_change_filename = 'sloc_vs_all_changes_age'
bug_filename = "sloc_vs_bug_age"
if filter_less_than_x_age:
all_change_filename = "rq3_" + all_change_filename
bug_filename = "rq3_" + bug_filename
self.render_graph(data, 'sloc_vs_all_changes', all_change_filename + ".pdf", self.AGES)
self.render_graph(data, 'sloc_vs_bugCount', bug_filename + ".pdf", self.AGES)
def render_graph(self, data, grp, filename, ages):
age0 = data[(data["age_threshold"] == ages[0]) & (data["group"] == grp)]
self.print_repo_not_in_list(age0, ages[0], grp)
age183 = data[(data["age_threshold"] == ages[1]) & (data["group"] == grp)]
self.print_repo_not_in_list(age183, ages[1], grp)
age365 = data[(data["age_threshold"] == ages[2]) & (data["group"] == grp)]
self.print_repo_not_in_list(age365, ages[2], grp)
age730 = data[(data["age_threshold"] == ages[3]) & (data["group"] == grp)]
self.print_repo_not_in_list(age730, ages[3], grp)
age1095 = data[(data["age_threshold"] == ages[4]) & (data["group"] == grp)]
self.print_repo_not_in_list(age1095, ages[4], grp)
age1460 = data[(data["age_threshold"] == ages[5]) & (data["group"] == grp)]
self.print_repo_not_in_list(age1095, ages[5], grp)
result = []
result.append(self.apply_cliff_delta(age0["corr"], age183["corr"], "age0", "age183"))
result.append(self.apply_cliff_delta(age0["corr"], age365["corr"], "age0", "age365"))
result.append(self.apply_cliff_delta(age0["corr"], age730["corr"], "age0", "age730"))
result.append(self.apply_cliff_delta(age0["corr"], age1095["corr"], "age0", "age1095"))
result.append(self.apply_cliff_delta(age0["corr"], age1460["corr"], "age0", "age1460"))
result.append(self.apply_cliff_delta(age183["corr"], age365["corr"], "age183", "age365"))
result.append(self.apply_cliff_delta(age183["corr"], age730["corr"], "age183", "age730"))
result.append(self.apply_cliff_delta(age183["corr"], age1095["corr"], "age183", "age1095"))
result.append(self.apply_cliff_delta(age183["corr"], age1460["corr"], "age183", "age1460"))
result.append(self.apply_cliff_delta(age365["corr"], age730["corr"], "age365", "age730"))
result.append(self.apply_cliff_delta(age365["corr"], age1095["corr"], "age365", "age1095"))
result.append(self.apply_cliff_delta(age365["corr"], age1460["corr"], "age365", "age1460"))
result.append(self.apply_cliff_delta(age730["corr"], age1095["corr"], "age730", "age1095"))
result.append(self.apply_cliff_delta(age730["corr"], age1460["corr"], "age730", "age1460"))
result.append(self.apply_cliff_delta(age1095["corr"], age1460["corr"], "age1095", "age1460"))
tmp_df = | pd.DataFrame.from_dict(result) | pandas.DataFrame.from_dict |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: | pd.Timestamp("2012-12-04 00:00:00") | pandas.Timestamp |
import re
import yaml
import calendar
import pandas as pd
from pathlib import Path
from datetime import datetime
from influxdb import InfluxDBClient
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# VADER
analyzer = SentimentIntensityAnalyzer()
# InfluxDB connections settings
host = 'XXX.XXX.XXX.XXX'
port = XXX
user = 'XXX'
password = '<PASSWORD>'
dbname = 'cryptotweets'
metric = 'tweets'
client = InfluxDBClient(host, port, user, password, dbname)
def create_filters(config):
filters = {}
topics = config['topics']
for key, keywords in topics.items():
regex = []
for keyword in keywords:
regex.append(re.escape(keyword) + r'\b')
filters[key] = regex
return filters
def process_tweets(raw_tweets, filters):
tweets = {}
for key, _ in filters.items():
tweets[key] = []
for response in raw_tweets:
for tweet in response:
if not tweet:
break
time = calendar.timegm(datetime.strptime(
tweet['time'][:19], "%Y-%m-%dT%H:%M:%S").timetuple())
polarity = analyzer.polarity_scores(tweet['text'])
new_t = {
'time': time,
'neg': polarity['neg'],
'neu': polarity['neu'],
'pos': polarity['pos'],
'norm': polarity['compound']
}
for key, keywords in filters.items():
regex = '|'.join(map(str, keywords))
if re.search(regex, tweet['text']):
tweets[key].append(new_t)
return tweets
def fetch_tweets(start_time, end_time):
with open('config.yml', 'r') as config_file:
config = yaml.load(config_file)
start_date = | pd.to_datetime(start_time, unit='s') | pandas.to_datetime |
#!usr/bin/env python3
# -*- coding:utf-8 -*-
# @time : 2021/2/19 9:38
# @author : <NAME>
import argparse
import asyncio
import os
import time
from functools import partial
from multiprocessing import Pool, Process
import numpy as np
import pandas as pd
from my_utils import Smiles
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
def sdf2csv(content):
line = content[0]
last_name = line.split(' 3D')[0].split('\n')[-2]
score = line.split('> <r_i_docking_score>')[1].split('\n')[1]
pd.DataFrame([last_name, score, job_type]).T.to_csv(dst_csv, index=False, header=False, mode='a')
n = 1
for line in content[1:]:
lig_name = line.split(' 3D')[0].split('\n')[-2]
if lig_name == last_name:
lig_name = f'{lig_name}_{n}'
n += 1
else:
last_name = lig_name
n = 1
score = line.split('> <r_i_docking_score>')[1].split('\n')[1]
pd.DataFrame([lig_name, score, job_type]).T.to_csv(dst_csv, index=False, header=False, mode='a')
def sp2csv(src_score_file, dst_score_file):
src_df = pd.read_csv(src_score_file)
# get uniq_active_names
uniq_names = list(set(i for i in src_df.loc[:, 'NAME'].values))
# get score
for uniq_name in uniq_names:
tmp_df = src_df[src_df.NAME == uniq_name]
tmp_df.sort_values(by='r_i_docking_score', inplace=True)
tmp_df = tmp_df.loc[:, ['NAME', 'r_i_docking_score']]
tmp_df['NAME'] = [f'{uniq_name}_{i}' for i in range(len(tmp_df))]
tmp_df_ac = pd.DataFrame(tmp_df.iloc[0, :]).T
if tmp_df_ac.iloc[0, 1] <= -6:
tmp_df_decoys = tmp_df.iloc[1:, :]
tmp_df_decoys = tmp_df_decoys[tmp_df_decoys.r_i_docking_score >= -4].iloc[-50:, :]
tmp_df = tmp_df_ac.append(tmp_df_decoys, sort=False)
tmp_df.to_csv(dst_score_file, index=True, header=False, mode='a')
def split_(lig_name, content, dst_path, format_):
# lig_name = content.split('\n')[0].strip() # 获取小分子名字
lig_file = '{}/{}.{}'.format(dst_path, lig_name, format_) # 定义输出分子路径
if not os.path.exists(lig_file):
# 输出文件
with open(lig_file, 'w') as f:
f.write(f'{file_label[format_]}\n' + content)
if __name__ == '__main__':
# init
argparser = argparse.ArgumentParser()
argparser.add_argument('--target', type=str, default='mpro')
argparser.add_argument('--src_path', type=str, default='/home/xujun/Project_5/total_worflow/docking')
argparser.add_argument('--lig_score_csv', type=str, default='score.csv')
args = argparser.parse_args()
# instance
target = args.target
src_path = f'{args.src_path}/{target}/docked'
dst_path = f'{args.src_path}/{target}/ad'
format_ = 'sdf'
src_ligand_file = f'{src_path}/SP_raw.{format_}'
src_score_file = f'{src_path}/SP_raw.csv'
dst_csv = f'{dst_path}/{args.lig_score_csv}'
dst_ligand = f'{dst_path}/decoy_conformations.{format_}'
# smile
smiler = Smiles(smile_lis=[''], names=[])
file_label = {
'sdf': '$$$$',
'mol2': '@<TRIPOS>MOLECULE'
}
# split
if not os.path.exists(dst_path):
os.makedirs(dst_path)
# read file
# 读取数据,存到con中
with open(src_ligand_file, 'r') as f:
con = f.read()
# 根据@<TRIPOS>MOLECULE分割字符串 第一个是None, 根据$$$$\n分割 最后一个是‘’
con = con.split(f'{file_label[format_]}\n')[:-1]
# 判断数据量是否相同
df = | pd.read_csv(src_score_file) | pandas.read_csv |
#%%
import pandas as pd
import numpy as np
## initial trial with 500 normal and abnormal transcripts
#allnormal=np.load('allnormal30875_first500.npy', allow_pickle=True)
#allabnormal=np.load('allabnormal30875_first500.npy', allow_pickle=True)
# Load full SAD Human Body Map ERR030875 data
allnormal=np.load("/home/priyamvada/data/allnormal30875.npy",allow_pickle=True)
allabnormal=np.load("/home/priyamvada/data/allabnormal30875.npy",allow_pickle=True)
column_names=('transcript_name', 'expected_coverage', 'observed_coverage', 'label')
df_normal= | pd.DataFrame(allnormal,columns=column_names) | pandas.DataFrame |
import unittest
from enda.timeseries import TimeSeries
import pandas as pd
import pytz
class TestTimeSeries(unittest.TestCase):
def test_collapse_dt_series_into_periods(self):
# periods is a list of (start, end) pairs.
periods = [
(pd.to_datetime('2018-01-01 00:15:00+01:00'), pd.to_datetime('2018-01-01 00:45:00+01:00')),
(pd.to_datetime('2018-01-01 10:15:00+01:00'), pd.to_datetime('2018-01-01 15:45:00+01:00')),
(pd.to_datetime('2018-01-01 20:15:00+01:00'), pd.to_datetime('2018-01-01 21:45:00+01:00')),
]
# expand periods to build a time-series with gaps
dti = pd.DatetimeIndex([])
for s, e in periods:
dti = dti.append(pd.date_range(s, e, freq="30min"))
self.assertEqual(2+12+4, dti.shape[0])
# now find periods in the time-series
# should work with 2 types of freq arguments
for freq in ["30min", pd.to_timedelta("30min")]:
computed_periods = TimeSeries.collapse_dt_series_into_periods(dti, freq)
self.assertEqual(len(computed_periods), len(periods))
for i in range(len(periods)):
self.assertEqual(computed_periods[i][0], periods[i][0])
self.assertEqual(computed_periods[i][1], periods[i][1])
def test_collapse_dt_series_into_periods_2(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 01:00:00+01:00')
])
with self.assertRaises(ValueError):
# should raise an error because 15min gaps are not multiples of freq=30min
TimeSeries.collapse_dt_series_into_periods(dti, freq="30min")
def test_collapse_dt_series_into_periods_3(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:00:00+01:00'),
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00')
])
with self.assertRaises(ValueError):
# should raise an error because 15min gaps are not multiples of freq=30min
TimeSeries.collapse_dt_series_into_periods(dti, "30min")
def test_find_missing_and_extra_periods_1(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:00:00+01:00'),
pd.to_datetime('2018-01-01 00:15:00+01:00'),
| pd.to_datetime('2018-01-01 00:30:00+01:00') | pandas.to_datetime |
# %%
import pandas as pd
import numpy as np
import paddle.fluid as fluid
def create_df(output):
list = []
for em in output:
list.append(em[0])
df = pd.DataFrame()
df['similarity'] = list
norm = pd.read_csv('../task_data/xw/norm_q.txt', sep='\t')
df['norm_query'] = norm['text_a']
df.sort_values('similarity', inplace=True, ascending=False)
# print(df.head())
# print(norm.head())
return df
def cos_sim(np_x, np_y):
x = fluid.layers.data(name='x', shape=[666, 768], dtype='float32', append_batch_size=False)
y = fluid.layers.data(name='y', shape=[1, 768], dtype='float32', append_batch_size=False)
out = fluid.layers.cos_sim(x, y)
place = fluid.CUDAPlace(0)
#place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
output = exe.run(feed={"x": np_x, "y": np_y}, fetch_list=[out])
return output[0]
# print(output)
# %%
if __name__ == '__main__':
emb1 = np.load('norm/cls_emb.npy')
emb2 = np.load('sim/cls_emb.npy')
df_emb2 = | pd.read_csv('../task_data/xw/sim_q.txt', sep='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Collection of utility objects and functions for the :mod:`fluxdataqaqc`
module.
"""
import numpy as np
import pandas as pd
from pathlib import Path
class Convert(object):
"""
Tools for unit conversions for ``flux-data-qaqc`` module.
"""
# this is a work in progress, add more as needed/conversions are handled
# input unit strings are not case sensitive, they will be forced to lower
allowable_units = {
'LE': ['w/m2','mj/m2'],
'H': ['w/m2','mj/m2'],
'Rn': ['w/m2','mj/m2'],
'G': ['w/m2','mj/m2'],
'lw_in': ['w/m2','mj/m2'],
'lw_out': ['w/m2','mj/m2'],
'sw_in': ['w/m2'],
'sw_out': ['w/m2','mj/m2'],
'ppt': ['mm', 'in', 'm'],
'vp': ['kpa', 'hpa', 'pa'],
'vpd': ['kpa', 'hpa', 'pa'],
't_avg': ['c', 'f', 'k'],
't_min': ['c', 'f', 'k'],
't_max': ['c', 'f', 'k'],
'ws': ['m/s', 'mph']
}
# for printing and plotting purposes
pretty_unit_names = {
'pa': 'Pa',
'hpa': 'hPa',
'kpa': 'kPa',
'c': 'C',
'f': 'F',
'k': 'K'
}
# some variables need to be in specified units for internal calculations
# they will be attempted to be converted upon initialization of a QaQc obj
# allowable initial units can be found in QaQc.allowable_units
required_units = {
'LE': 'w/m2',
'H': 'w/m2',
'Rn': 'w/m2',
'G': 'w/m2',
'lw_in': 'w/m2',
'lw_out': 'w/m2',
'sw_in': 'w/m2',
'sw_out': 'w/m2',
'ppt': 'mm',
'vp': 'kpa',
'vpd': 'kpa',
't_avg': 'c',
't_min': 'c',
't_max': 'c',
'ws': 'm/s'
}
def __init__(self):
self._conversion_map = {
'k_to_c': self._k_to_c,
'hpa_to_kpa': self._hpa_to_kpa,
'pa_to_kpa': self._pa_to_kpa,
'in_to_mm': self._in_to_mm,
'm_to_mm': self._m_to_mm,
'f_to_c': self._f_to_c,
'mj/m2_to_w/m2': self._mj_per_m2_to_watts_per_m2,
'mph_to_m/s': self._mph_to_m_per_s # miles/hr to meters/sec
}
@classmethod
def convert(cls, var_name, initial_unit, desired_unit, df):
"""
Givin a valid initial and desired variable dimension for a variable
within a :obj:`pandas.DataFrame`, make the conversion and return the
updated :obj:`pandas.DataFrame`.
For a list of variables that require certain units within
``flux-data-qaqc`` see :attr:`Convert.allowable_units` (names of
allowable options of input variable dimensions) and
:attr:`Convert.required_units` (for the mandatory dimensions of certain
variables before running QaQc calculations).
Arguments:
var_name (str): name of variable to convert in ``df``.
initial_unit (str): name of initial unit of variable, must be valid
from :attr:`Convert.allowable_units`.
desired_unit (str): name of units to convert to, also must be valid.
df (:obj:`pandas.DataFrame`): :obj:`pandas.DataFrame` containing
variable to be converted, i.e. with ``var_name`` in columns.
Returns:
df (:obj:`pandas.DataFrame`): updated dataframe with specified variable's units converted
Note:
Many potential dimensions may not be provided for automatic
conversion, if so you may need to update your variable dimensions
manually, e.g. within a :attr:`.Data.df` before creating a
:obj:`.QaQc` instance. Unit conversions are required for
variables that can potentially be used in calculations within
:obj:`.Data` or :obj:`.QaQc`.
"""
conv = cls()
convert_key = '{}_to_{}'.format(initial_unit, desired_unit)
convert_func = conv._conversion_map[convert_key]
print(
'Converting {} from {} to {}'.format(
var_name, initial_unit, desired_unit
)
)
df = convert_func(df, var_name)
return df
def _in_to_mm(self, df, var_name):
df[var_name] *= 25.4
return df
def _m_to_mm(self, df, var_name):
df[var_name] *= 1000
return df
def _f_to_c(self, df, var_name):
df[var_name] = (32 * df[var_name]) * (5/9)
return df
def _k_to_c(self, df, var_name):
df[var_name] -= 273.15
return df
def _hpa_to_kpa(self, df, var_name):
df[var_name] /= 10
return df
def _pa_to_kpa(self, df, var_name):
df[var_name] /= 1000
return df
def _mph_to_m_per_s(self, df, var_name):
df[var_name] *= 0.44704
return df
def _mj_per_m2_to_watts_per_m2(self, df, var_name):
# assumes average mj per day is correct- only valid daily
# because shortwate rad may be used in data (before daily) it is
# not covered for automatic conversion because time period is unknown
df[var_name] *= 11.574074074074074
return df
def monthly_resample(df, cols, agg_str, thresh=0.75):
"""
Resample dataframe to monthly frequency while excluding
months missing more than a specified percentage of days of the month.
Arguments:
df (:obj:`pandas.DataFrame`): datetime indexed DataFrame instance
cols (list): list of columns in `df` to resample to monthy frequency
agg_str (str): resample function as string, e.g. 'mean' or 'sum'
Keyword Arguments:
thresh (float): threshold (decimal fraction) of how many days in a
month must exist for it to be temporally resampled, otherwise
the monthly value for the month will be null.
Returns:
ret (:obj:`pandas.DataFrame`): datetime indexed DataFrame that has been resampled to monthly time frequency.
Note:
If taking monthly totals (`agg_str` = 'sum') missing days will be filled
with the months daily mean before summation.
"""
if agg_str == 'sum':
mdf = df.loc[:,cols].apply(pd.to_numeric).resample('M').agg(
[agg_str, 'count', 'mean']
)
else:
mdf = df.loc[:,cols].apply(pd.to_numeric).resample('M').agg(
[agg_str, 'count']
)
ret = | pd.DataFrame() | pandas.DataFrame |
from django.http import JsonResponse
import requests
import asyncio
import aiohttp
import numpy as np
import pandas as pd
from pandas import json_normalize
import json
from functools import reduce
import unidecode
from random import randint
from time import sleep
import traceback
import sys
import random
import logging
def get_spotify_music_profile(request):
spotifyAPI = SpotifyAPI(request)
try:
music_profile = spotifyAPI.get_music_profile()
return music_profile
except Exception as e:
# traceback.format_exc()
print('GLOBAL EXCEPTION - BAD. RETURNING ERROR TO FRONT END')
logging.exception("music profile refresh exception")
error_report = {
'error': {
'message': str(e),
'status': 500,
}
}
return error_report
class SpotifyAPI:
REQUEST_EXCEPTION_MSG = "Spotify API Request Exception while fetching "
SAVE_PROFILE_AS_CSV = False
USER_PLAYLISTS_ONLY = True # don't change unless you want playlists a user follows to also be included
def __init__(self, access_token):
self.header = {'Authorization' : "Bearer "+access_token}
self.user_id = self.fetch_user_id()
self.artist_columns = []
self.track_columns = []
self.artists_dataframes = []
self.tracks_dataframes = []
def get_music_profile(self):
asyncio.run(self.collect_artists_and_tracks_dataframes())
print("converting dataframes to JSON...")
print(f'returning { self.artists_df.shape[0] } artists and { self.tracks_df.shape[0] } tracks')
if self.SAVE_PROFILE_AS_CSV:
self.artists_df.to_csv('artists_df.csv')
self.tracks_df.to_csv('tracks_df.csv')
artists_json = self.get_artists_json(self.artists_df)
tracks_json = self.get_tracks_json(self.tracks_df)
music_profile = {
"artists" : artists_json,
"tracks" : tracks_json,
}
return music_profile
def get_artists_json(self, artists_df):
return artists_df.to_json(orient='records')
def get_tracks_json(self, tracks_df):
return tracks_df.to_json(orient='records')
async def collect_artists_and_tracks_dataframes(self):
# fetch artists and tracks together, due to how the Spotify API returns both
print("collect_artists_and_tracks_dataframes()...")
tasks = [self.fetch_top_artists("long_term"), self.fetch_top_artists("medium_term"), self.fetch_top_artists("short_term")
, self.fetch_top_tracks("long_term"), self.fetch_top_tracks("medium_term"), self.fetch_top_tracks("short_term")
, self.fetch_followed_artists(), self.fetch_saved_tracks(), self.get_all_playlists()]
await asyncio.gather(*tasks)
print("initial tasks (fetches) have finishing gathering..")
print("initiating get_artists_master_df(), where full artist objects will be fetched..")
self.artists_df = await self.get_artists_master_df()
print("finished fetching full objects.")
self.tracks_df = self.get_tracks_master_df()
async def get_artists_master_df(self):
if self.artists_dataframes == []:
return pd.DataFrame()
artists_df = None
if len(self.artists_dataframes) > 1:
artists_df = reduce(lambda left, right: pd.merge(left, right, how="outer"), self.artists_dataframes)
else:
artists_df = self.artists_dataframes[0]
artists_df = artists_df.drop_duplicates()
if 'id' not in artists_df:
return pd.DataFrame()
# add all columns needed if we don't have them yet
for col in self.artist_columns:
if col not in artists_df:
artists_df[col] = np.NaN
if 'track.id' not in artists_df:
artists_df['track.id'] = np.NaN
# here, i fill in missing values
# with a second gather operation
if 'image' in artists_df:
artists_missing = artists_df[artists_df['image'].isnull()]
else:
artists_missing = artists_df
missing_ids = artists_missing['id'].tolist()
missing_ids = list(set(missing_ids))
if len(missing_ids) > 0:
artists_full_df = await self.get_full_artist_dataframes(missing_ids)
artists_df = pd.merge(artists_df, artists_full_df, how="outer")
artists_df = artists_df.drop_duplicates()
artists_df['smallImage'] = artists_df['image']
artists_df['bigImage'] = artists_df['image']
artists_df.drop('image', axis = 1)
artists_df_transform = {}
for column in self.artist_columns:
artists_df_transform[column] = 'max'
artists_df_transform['bigImage'] = 'first'
artists_df_transform['smallImage'] = 'last'
artists_df_transform['uri'] = 'first'
def agg_track_list(tracks): # set to remove duplicates
track_list = [x for x in list(set(tracks)) if str(x) != 'nan']
return track_list
artists_df_transform['track.id'] = agg_track_list
def agg_genres_list(genres):
genre_list = [x for x in list(set(genres)) if str(x) != 'nan']
return genre_list
artists_df_transform['genres'] = agg_genres_list
artists_df = artists_df.groupby(['id', 'name']).agg(artists_df_transform)
artists_df.rename(columns = {'track.id': 'tracks'}, inplace = True)
artists_df[self.artist_columns] = artists_df[self.artist_columns].fillna(value=False)
artists_df.reset_index(level=['id', 'name'], inplace = True)
# add artist's tracks_length
def get_tracks_len(row):
return len(list(row['tracks']))
artists_df['tracks_length'] = artists_df.apply(get_tracks_len, axis=1)
# add artist's genres_length
def get_genres_len(row):
return len(list(row['genres']))
artists_df['genres_length'] = artists_df.apply(get_genres_len, axis=1)
def get_ascii_artist_name(row):
return unidecode.unidecode(row['name'])
artists_df['name_ascii'] = artists_df.apply(get_ascii_artist_name, axis=1)
return artists_df
def get_tracks_master_df(self):
if self.tracks_dataframes == []:
return pd.DataFrame()
tracks_df = reduce(lambda left, right: pd.merge(left, right, how="outer"), self.tracks_dataframes)
tracks_df = tracks_df.drop_duplicates()
if 'id' not in tracks_df:
return pd.DataFrame()
tracks_df[self.track_columns] = tracks_df[self.track_columns].fillna(value=False)
tracks_df_transform = {}
tracks_df_transform['image_size'] = 'min'
tracks_df_transform['image_url'] = 'first'
#tracks_df_transform['top_tracks_short_term'] = 'first'
#tracks_df_transform['saved_tracks'] = 'first'
#tracks_df_transform['top_tracks_medium_term'] = 'first'
#tracks_df_transform['top_tracks_long_term'] = 'first'
#tracks_df_transform['playlist'] = 'first'
tracks_df = tracks_df.groupby(['id', 'name', 'uri']).agg(tracks_df_transform)
tracks_df.reset_index(level=['id', 'name', 'uri'], inplace = True)
return tracks_df
async def fetch_top_artists(self, time_range):
print('fetching top artists... ', time_range)
self.artist_columns.append("top_artists_" + time_range)
self.artist_columns.append("top_artists_" + time_range + "_ranking")
offsets = [0, 49]
top_artists = []
for offset in offsets:
URL = "https://api.spotify.com/v1/me/top/artists?limit=50&offset="+str(offset)+"&time_range="+time_range
resp_dict = await self.fetch_json_from_URL(URL = URL, name = "top artists({}):".format(time_range))
# so if user's dont listen to enough artists in the short term,
# then less than 100 short term artists are returned
# in which case ['items'] equals [] and so we must check for this
# and just simply do nothing when it happens
if resp_dict and resp_dict['total'] > 0 and len(resp_dict['items']) > 0:
artists_df = self.extract_full_artist_from_json(resp_dict['items'])
artists_df["top_artists_"+time_range] = True
top_artists.append(artists_df)
if len(top_artists) > 0:
artists_df = pd.concat(top_artists)
if 'id' in artists_df:
current_ranking = 0
rankings = []
seen_id = set()
for index, row in artists_df.iterrows():
if row['id'] not in seen_id:
current_ranking += 1
seen_id.add(row['id'])
rankings.append(current_ranking)
artists_df["top_artists_" + time_range + "_ranking"] = rankings
artists_df = artists_df[artists_df['id'].notnull()]
self.artists_dataframes.append(artists_df)
async def fetch_top_tracks(self, time_range):
print('fetching top tracks... ', time_range)
#self.track_columns.append("top_tracks_" + time_range)
offsets = [0, 49]
all_artists = []
all_tracks = []
for offset in offsets:
URL = "https://api.spotify.com/v1/me/top/tracks?limit=50&offset="+str(offset)+"&time_range="+time_range
resp_dict = await self.fetch_json_from_URL(URL = URL, name = "artists from top tracks({})".format(time_range))
if resp_dict and resp_dict['total'] > 0 and len(resp_dict['items']) > 0:
artists_df = json_normalize(data = resp_dict['items'], record_path=['artists'], meta=['id'], meta_prefix='track.')
artists_df = artists_df[['id', 'name', 'track.id']]
all_artists.append(artists_df)
tracks_df = json_normalize(data = resp_dict['items'], record_path=['album', 'images'], meta=['id', 'name', 'uri'], meta_prefix='track.')
tracks_df = self.cleanup_tracks_df(tracks_df)
tracks_df["top_tracks_"+time_range] = True
all_tracks.append(tracks_df)
if len(all_artists) > 0:
all_artists_df = pd.concat(all_artists)
if 'id' in all_artists_df:
all_artists_df = all_artists_df[all_artists_df['id'].notnull()]
self.artists_dataframes.append(all_artists_df)
if len(all_tracks) > 0:
all_tracks_df = pd.concat(all_tracks)
if 'id' in all_tracks_df:
all_tracks_df = all_tracks_df[all_tracks_df['id'].notnull()]
self.tracks_dataframes.append(all_tracks_df)
async def fetch_followed_artists(self):
print('fetching followed artists... ')
self.artist_columns.append("followed_artist")
next = "https://api.spotify.com/v1/me/following?type=artist&limit=50&offset=0"
followed_artists = []
while next:
resp_dict = await self.fetch_json_from_URL(URL = next, name = "followed artists")
if resp_dict and resp_dict['artists'] and resp_dict['artists']['total'] > 0 and len(resp_dict['artists']['items']) > 0:
next = resp_dict['artists']['next']
artists_df = self.extract_full_artist_from_json(resp_dict['artists']['items'])
artists_df['followed_artist'] = True
followed_artists.append(artists_df)
else:
break
if len(followed_artists) > 0:
followed_artists_df = pd.concat(followed_artists)
if 'id' in followed_artists_df:
followed_artists_df = followed_artists_df[followed_artists_df['id'].notnull()]
self.artists_dataframes.append(followed_artists_df)
async def fetch_saved_tracks(self):
print('fetching saved tracks... ')
#self.track_columns.append("saved_tracks")
next = "https://api.spotify.com/v1/me/tracks?limit=50&offset=0"
all_artists = []
all_tracks = []
while next:
resp_dict = await self.fetch_json_from_URL(URL = next, name = "saved tracks")
if resp_dict and resp_dict['total'] > 0 and len(resp_dict['items']) > 0:
next = resp_dict['next']
artists_df = json_normalize(data = resp_dict['items'], record_path=['track', 'artists'], meta=[['track', 'id']])
artists_df = artists_df[['id', 'name', 'track.id']]
all_artists.append(artists_df)
tracks_df = json_normalize(data = resp_dict['items'], record_path=['track', 'album', 'images'], meta=[['track', 'name'], ['track', 'id'], ['track', 'uri']])
tracks_df = self.cleanup_tracks_df(tracks_df)
tracks_df["saved_tracks"] = True
all_tracks.append(tracks_df)
else:
break
if len(all_artists) > 0:
all_artists_df = pd.concat(all_artists)
if 'id' in all_artists_df:
all_artists_df = all_artists_df[all_artists_df['id'].notnull()]
self.artists_dataframes.append(all_artists_df)
if len(all_tracks) > 0:
all_tracks_df = pd.concat(all_tracks)
if 'id' in all_tracks_df:
all_tracks_df = all_tracks_df[all_tracks_df['id'].notnull()]
self.tracks_dataframes.append(all_tracks_df)
async def fetch_playlists(self):
print('fetch_playlists...')
playlists_all = []
next = "https://api.spotify.com/v1/me/playlists?limit=50&offset=0"
while next:
resp_dict = await self.fetch_json_from_URL(URL = next, name = "playlists")
if resp_dict and resp_dict['total'] > 0 and len(resp_dict['items']) > 0:
next = resp_dict['next']
playlists_full = json_normalize(resp_dict['items'])
playlists = playlists_full[['id', 'owner.id']]
if self.USER_PLAYLISTS_ONLY:
playlists = playlists[playlists['owner.id'] == self.user_id]
playlists.drop('owner.id', axis=1, inplace=True)
playlists_all.append(playlists)
else:
break
if len(playlists_all) > 0:
return pd.concat(playlists_all)
return pd.DataFrame()
async def get_all_playlists(self):
playlists = await self.fetch_playlists()
self.artist_columns.append("playlist")
if playlists.empty or 'id' not in playlists:
return
tracks = []
artists = []
print('fetching', len(playlists), 'playlists...')
tasks = [self.fetch_playlist(playlistID) for playlistID in playlists['id']]
playlistDatas = await asyncio.gather(*tasks)
for playlistData in playlistDatas:
if not playlistData[0].empty:
artists.append(playlistData[0])
if not playlistData[1].empty:
tracks.append(playlistData[1])
if artists and len(artists) > 0:
self.artists_dataframes.append(pd.concat(artists))
if tracks and len(tracks) > 0:
self.tracks_dataframes.append(pd.concat(tracks))
async def fetch_playlist(self, ID):
next = "https://api.spotify.com/v1/playlists/"+ID+"/tracks?limit=100&offset=0"
all_artists = []
all_tracks = []
while next:
resp_dict = await self.fetch_json_from_URL(URL = next, name = "tracks from playlist")
if resp_dict and resp_dict['total'] > 0 and len(resp_dict['items']) > 0:
next = resp_dict['next']
artists_df = json_normalize(data = resp_dict['items'], record_path=['track', 'artists'], meta=[['track', 'id']])
artists_df = artists_df[['id', 'name', 'track.id']]
artists_df['playlist'] = True
all_artists.append(artists_df)
tracks_df = json_normalize(data = resp_dict['items'], record_path=['track', 'album', 'images'], meta=[['track', 'name'], ['track', 'id'], ['track', 'uri']])
tracks_df = self.cleanup_tracks_df(tracks_df)
tracks_df["playlist"] = True
all_tracks.append(tracks_df)
else:
break
all_artists_df = pd.DataFrame()
all_tracks_df = pd.DataFrame()
if len(all_artists) > 0:
all_artists_df = pd.concat(all_artists)
if 'id' in all_artists_df:
all_artists_df = all_artists_df[all_artists_df['id'].notnull()]
if len(all_tracks) > 0:
all_tracks_df = pd.concat(all_tracks)
if 'id' in all_tracks_df:
all_tracks_df = all_tracks_df[all_tracks_df['id'].notnull()]
return all_artists_df, all_tracks_df
''' takes a list of artist IDs, fetches the full artist objects from spotify using these IDs (50 at a time max),
calls extract_full_artist_from_json on the returns and returns a dataframe with all the columns needed
for the mobile app '''
async def get_full_artist_dataframes(self, all_IDs):
print(f"get_all_details_on({len(all_IDs)})_artists...")
ID_segments = self.split_into_N(all_IDs, 50)
tasks = [self.fetch_full_artists(IDs) for IDs in ID_segments]
artist_dataframes = await asyncio.gather(*tasks)
return pd.concat(artist_dataframes)
''' IDs should be of length 50 or less '''
async def fetch_full_artists(self, IDs):
URL = "https://api.spotify.com/v1/artists"
resp_dict = await self.fetch_json_from_URL(
URL = URL,
params = [('ids', ",".join(IDs))],
name = "full artist objects")
if resp_dict and resp_dict['artists']:
try:
artist_df = self.extract_full_artist_from_json(resp_dict['artists'])
except Exception as e:
with open('errorArtists.json', 'w') as outfile:
json.dump(resp_dict['artists'], outfile)
if artist_df.empty:
return pd.DataFrame()
if 'id' in artist_df:
artist_df = artist_df[artist_df['id'].notnull()]
return artist_df
return pd.DataFrame()
def split_into_N(self, _list, N):
return [_list[i * N:(i + 1) * N] for i in range((len(_list) + N - 1) // N )]
''' json_data must be a JSON array of full artist objects. Returns a dataframe of all the objects with
columns: id, name, genres, image, image_size'''
def extract_full_artist_from_json(self, json_data):
json_data_no_none = []
for val in json_data:
if val != None:
json_data_no_none.append(val)
artists_genres = | json_normalize(data = json_data_no_none, record_path='genres', meta=['id', 'name', 'uri']) | pandas.json_normalize |
from pandas.core.frame import DataFrame
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.models import Variable
import boto3
import json
import pandas as pd
class FinancialProcessorOperator(BaseOperator):
ui_color = '#358140'
@apply_defaults
def __init__(self, stock_method:str, *args, **kwargs):
super(FinancialProcessorOperator, self).__init__(*args, **kwargs)
self.__AWS_S3_BUCKET_BRONZE = Variable.get("S3_BUCKET_BRONZE")
self.__AWS_S3_BUCKET_SILVER = Variable.get("S3_BUCKET_SILVER")
self.__AWS_ACCESS_KEY_ID = Variable.get("USER_ACCESS_KEY_ID")
self.__AWS_SECRET_ACCESS_KEY = Variable.get("USER_SECRET_ACCESS_KEY")
self.__STOCK_METHOD = stock_method.lower()
@staticmethod
def generate_file_path(stock_method: str, company: str, file_name: str, file_format: str):
return f"stock_method={stock_method}/company={company}/{file_name}.{file_format}"
def client_connection(self):
client = boto3.client(
"s3",
aws_access_key_id=self.__AWS_ACCESS_KEY_ID,
aws_secret_access_key=self.__AWS_SECRET_ACCESS_KEY,
)
return client
def get_object(self, bucket: str, key: str):
client = self.client_connection()
response = client.get_object(Bucket=bucket,
Key=key)
if self.__STOCK_METHOD == "time_series_daily_adjusted":
data = response.get("Body")
else:
data = json.loads(response.get("Body").read().decode('utf-8'))
return data
def read_from_bronze(self, company):
data = self.get_object(bucket=self.__AWS_S3_BUCKET_BRONZE,
key=self.generate_file_path(stock_method=self.__STOCK_METHOD,
company=company['Symbol'],
file_name=self.__STOCK_METHOD,
file_format="json"))
return data
def save_to_silver(self, company:str, df: DataFrame):
df.to_parquet(
"s3://{}/{}".format(self.__AWS_S3_BUCKET_SILVER,
self.generate_file_path(stock_method=self.__STOCK_METHOD,
company=company['Symbol'],
file_name=self.__STOCK_METHOD,
file_format="parquet")),
storage_options={
"key": self.__AWS_ACCESS_KEY_ID,
"secret": self.__AWS_SECRET_ACCESS_KEY
},
index=False,
engine="fastparquet"
)
return '{} was processed'.format(company['Symbol'])
def process_earnings(self):
companies = self.get_object(bucket=self.__AWS_S3_BUCKET_BRONZE,
key="companies/s&p-500-companies.json")
for company in companies:
s3_data = self.read_from_bronze(company=company)
try:
symbol = s3_data['symbol']
quarterlyEarnings = s3_data['quarterlyEarnings']
df = pd.DataFrame(quarterlyEarnings)
df['company'] = symbol
self.log.info(self.save_to_silver(company, df))
except Exception as exception:
self.log.info(exception)
def process_overview(self):
companies = self.get_object(bucket=self.__AWS_S3_BUCKET_BRONZE,
key="companies/s&p-500-companies.json")
for company in companies:
s3_data = self.read_from_bronze(company=company)
try:
df = pd.DataFrame(s3_data, index=['Symbol'])
if "Name" in df.columns:
self.log.info(self.save_to_silver(company, df))
except Exception as exception:
self.log.info(exception)
def process_time_series(self):
companies = self.get_object(bucket=self.__AWS_S3_BUCKET_BRONZE,
key="companies/s&p-500-companies.json")
companies = json.loads(companies.read().decode('utf-8'))
for company in companies:
s3_data = self.read_from_bronze(company=company)
try:
df = | pd.read_json(s3_data, orient='records') | pandas.read_json |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy.io.votable import parse
from sh import bzip2
from ...lib.context_managers import cd
# =============================================================================
# CONSTANTS
# =============================================================================
PATH = os.path.abspath(os.path.dirname(__file__))
CATALOG_PATH = os.path.join(PATH, "carpyncho_catalog.pkl")
# =============================================================================
# BUILD
# =============================================================================
def get_ogle_3_resume():
with cd(PATH):
bzip2("-f", "-dk", "ogleIII_all.csv.bz2")
df = | pd.read_csv("ogleIII_all.csv") | pandas.read_csv |
"""Make predictions based on class probabilities and thresholds"""
from pathlib import Path
import pandas as pd
def prediction_dataframe(probabilities, thresholds=0.0):
if isinstance(probabilities, list):
# Need to join multiple csv-files as one df
df_list = []
for csv in probabilities:
df = pd.read_csv(csv)
# Create multi-index from sample name and roi number
df.insert(0, "sample", Path(csv).with_suffix("").stem)
df.set_index(["sample", "roi"], inplace=True)
df_list.append(df)
df = pd.concat(df_list)
elif isinstance(probabilities, (str, Path)):
df = | pd.read_csv(probabilities, index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
from texthero import nlp
from . import PandasTestCase
import doctest
import unittest
import string
"""
Test doctest
"""
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(nlp))
return tests
class TestNLP(PandasTestCase):
"""
Named entity.
"""
def test_named_entities(self):
s = pd.Series("New York is a big city")
s_true = pd.Series([[("New York", "GPE", 0, 8)]])
self.assertEqual(nlp.named_entities(s), s_true)
"""
Noun chunks.
"""
def test_noun_chunks(self):
s = pd.Series("Today is such a beautiful day")
s_true = pd.Series(
[[("Today", "NP", 0, 5), ("such a beautiful day", "NP", 9, 29)]]
)
self.assertEqual(nlp.noun_chunks(s), s_true)
"""
Count sentences.
"""
def test_count_sentences(self):
s = pd.Series("I think ... it counts correctly. Doesn't it? Great!")
s_true = pd.Series(3)
self.assertEqual(nlp.count_sentences(s), s_true)
def test_count_sentences_numeric(self):
s = pd.Series([13.0, 42.0])
self.assertRaises(TypeError, nlp.count_sentences, s)
def test_count_sentences_missing_value(self):
s = pd.Series(["Test.", np.nan])
self.assertRaises(TypeError, nlp.count_sentences, s)
def test_count_sentences_index(self):
s = pd.Series(["Test"], index=[5])
counted_sentences_s = nlp.count_sentences(s)
t_same_index = pd.Series([""], index=[5])
self.assertTrue(counted_sentences_s.index.equals(t_same_index.index))
def test_count_sentences_wrong_index(self):
s = pd.Series(["Test", "Test"], index=[5, 6])
counted_sentences_s = nlp.count_sentences(s)
t_different_index = pd.Series(["", ""], index=[5, 7])
self.assertFalse(counted_sentences_s.index.equals(t_different_index.index))
"""
POS tagging.
"""
def test_pos(self):
s = | pd.Series(["Today is such a beautiful day", "São Paulo is a great city"]) | pandas.Series |
"""
Tests for statistical pipeline terms.
"""
from numpy import (
arange,
full,
full_like,
nan,
where,
)
from pandas import (
DataFrame,
date_range,
Int64Index,
Timestamp,
)
from pandas.util.testing import assert_frame_equal
from scipy.stats import linregress, pearsonr, spearmanr
from catalyst.assets import Equity
from catalyst.errors import IncompatibleTerms, NonExistentAssetInTimeFrame
from catalyst.pipeline import CustomFactor, Pipeline
from catalyst.pipeline.data import USEquityPricing
from catalyst.pipeline.data.testing import TestingDataSet
from catalyst.pipeline.engine import SimplePipelineEngine
from catalyst.pipeline.factors.equity import (
Returns,
RollingLinearRegressionOfReturns,
RollingPearsonOfReturns,
RollingSpearmanOfReturns,
)
from catalyst.pipeline.loaders.frame import DataFrameLoader
from catalyst.pipeline.sentinels import NotSpecified
from catalyst.testing import (
AssetID,
AssetIDPlusDay,
check_arrays,
make_alternating_boolean_array,
make_cascading_boolean_array,
parameter_space,
)
from catalyst.testing.fixtures import (
WithSeededRandomPipelineEngine,
WithTradingEnvironment,
CatalystTestCase,
)
from catalyst.utils.numpy_utils import (
bool_dtype,
datetime64ns_dtype,
float64_dtype,
)
class StatisticalBuiltInsTestCase(WithTradingEnvironment, CatalystTestCase):
sids = ASSET_FINDER_EQUITY_SIDS = Int64Index([1, 2, 3])
START_DATE = Timestamp('2015-01-31', tz='UTC')
END_DATE = Timestamp('2015-03-01', tz='UTC')
@classmethod
def init_class_fixtures(cls):
super(StatisticalBuiltInsTestCase, cls).init_class_fixtures()
day = cls.trading_calendar.day
cls.dates = dates = date_range(
'2015-02-01', '2015-02-28', freq=day, tz='UTC',
)
# Using these start and end dates because they are a contigous span of
# 5 days (Monday - Friday) and they allow for plenty of days to look
# back on when computing correlations and regressions.
cls.start_date_index = start_date_index = 14
cls.end_date_index = end_date_index = 18
cls.pipeline_start_date = dates[start_date_index]
cls.pipeline_end_date = dates[end_date_index]
cls.num_days = num_days = end_date_index - start_date_index + 1
sids = cls.sids
cls.assets = assets = cls.asset_finder.retrieve_all(sids)
cls.my_asset_column = my_asset_column = 0
cls.my_asset = assets[my_asset_column]
cls.num_assets = num_assets = len(assets)
cls.raw_data = raw_data = DataFrame(
data=arange(len(dates) * len(sids), dtype=float64_dtype).reshape(
len(dates), len(sids),
),
index=dates,
columns=assets,
)
# Using mock 'close' data here because the correlation and regression
# built-ins use USEquityPricing.close as the input to their `Returns`
# factors. Since there is no way to change that when constructing an
# instance of these built-ins, we need to test with mock 'close' data
# to most accurately reflect their true behavior and results.
close_loader = DataFrameLoader(USEquityPricing.close, raw_data)
cls.run_pipeline = SimplePipelineEngine(
{USEquityPricing.close: close_loader}.__getitem__,
dates,
cls.asset_finder,
).run_pipeline
cls.cascading_mask = \
AssetIDPlusDay() < (sids[-1] + dates[start_date_index].day)
cls.expected_cascading_mask_result = make_cascading_boolean_array(
shape=(num_days, num_assets),
)
cls.alternating_mask = (AssetIDPlusDay() % 2).eq(0)
cls.expected_alternating_mask_result = make_alternating_boolean_array(
shape=(num_days, num_assets),
)
cls.expected_no_mask_result = full(
shape=(num_days, num_assets), fill_value=True, dtype=bool_dtype,
)
@parameter_space(returns_length=[2, 3], correlation_length=[3, 4])
def _test_correlation_factors(self, returns_length, correlation_length):
"""
Tests for the built-in factors `RollingPearsonOfReturns` and
`RollingSpearmanOfReturns`.
"""
assets = self.assets
my_asset = self.my_asset
my_asset_column = self.my_asset_column
dates = self.dates
start_date = self.pipeline_start_date
end_date = self.pipeline_end_date
start_date_index = self.start_date_index
end_date_index = self.end_date_index
num_days = self.num_days
run_pipeline = self.run_pipeline
returns = Returns(window_length=returns_length)
masks = (self.cascading_mask, self.alternating_mask, NotSpecified)
expected_mask_results = (
self.expected_cascading_mask_result,
self.expected_alternating_mask_result,
self.expected_no_mask_result,
)
for mask, expected_mask in zip(masks, expected_mask_results):
pearson_factor = RollingPearsonOfReturns(
target=my_asset,
returns_length=returns_length,
correlation_length=correlation_length,
mask=mask,
)
spearman_factor = RollingSpearmanOfReturns(
target=my_asset,
returns_length=returns_length,
correlation_length=correlation_length,
mask=mask,
)
columns = {
'pearson_factor': pearson_factor,
'spearman_factor': spearman_factor,
}
pipeline = Pipeline(columns=columns)
if mask is not NotSpecified:
pipeline.add(mask, 'mask')
results = run_pipeline(pipeline, start_date, end_date)
pearson_results = results['pearson_factor'].unstack()
spearman_results = results['spearman_factor'].unstack()
if mask is not NotSpecified:
mask_results = results['mask'].unstack()
check_arrays(mask_results.values, expected_mask)
# Run a separate pipeline that calculates returns starting
# (correlation_length - 1) days prior to our start date. This is
# because we need (correlation_length - 1) extra days of returns to
# compute our expected correlations.
results = run_pipeline(
Pipeline(columns={'returns': returns}),
dates[start_date_index - (correlation_length - 1)],
dates[end_date_index],
)
returns_results = results['returns'].unstack()
# On each day, calculate the expected correlation coefficients
# between the asset we are interested in and each other asset. Each
# correlation is calculated over `correlation_length` days.
expected_pearson_results = full_like(pearson_results, nan)
expected_spearman_results = full_like(spearman_results, nan)
for day in range(num_days):
todays_returns = returns_results.iloc[
day:day + correlation_length
]
my_asset_returns = todays_returns.iloc[:, my_asset_column]
for asset, other_asset_returns in todays_returns.iteritems():
asset_column = int(asset) - 1
expected_pearson_results[day, asset_column] = pearsonr(
my_asset_returns, other_asset_returns,
)[0]
expected_spearman_results[day, asset_column] = spearmanr(
my_asset_returns, other_asset_returns,
)[0]
expected_pearson_results = DataFrame(
data=where(expected_mask, expected_pearson_results, nan),
index=dates[start_date_index:end_date_index + 1],
columns=assets,
)
assert_frame_equal(pearson_results, expected_pearson_results)
expected_spearman_results = DataFrame(
data=where(expected_mask, expected_spearman_results, nan),
index=dates[start_date_index:end_date_index + 1],
columns=assets,
)
assert_frame_equal(spearman_results, expected_spearman_results)
@parameter_space(returns_length=[2, 3], regression_length=[3, 4])
def _test_regression_of_returns_factor(self,
returns_length,
regression_length):
"""
Tests for the built-in factor `RollingLinearRegressionOfReturns`.
"""
assets = self.assets
my_asset = self.my_asset
my_asset_column = self.my_asset_column
dates = self.dates
start_date = self.pipeline_start_date
end_date = self.pipeline_end_date
start_date_index = self.start_date_index
end_date_index = self.end_date_index
num_days = self.num_days
run_pipeline = self.run_pipeline
# The order of these is meant to align with the output of `linregress`.
outputs = ['beta', 'alpha', 'r_value', 'p_value', 'stderr']
returns = Returns(window_length=returns_length)
masks = self.cascading_mask, self.alternating_mask, NotSpecified
expected_mask_results = (
self.expected_cascading_mask_result,
self.expected_alternating_mask_result,
self.expected_no_mask_result,
)
for mask, expected_mask in zip(masks, expected_mask_results):
regression_factor = RollingLinearRegressionOfReturns(
target=my_asset,
returns_length=returns_length,
regression_length=regression_length,
mask=mask,
)
columns = {
output: getattr(regression_factor, output)
for output in outputs
}
pipeline = Pipeline(columns=columns)
if mask is not NotSpecified:
pipeline.add(mask, 'mask')
results = run_pipeline(pipeline, start_date, end_date)
if mask is not NotSpecified:
mask_results = results['mask'].unstack()
check_arrays(mask_results.values, expected_mask)
output_results = {}
expected_output_results = {}
for output in outputs:
output_results[output] = results[output].unstack()
expected_output_results[output] = full_like(
output_results[output], nan,
)
# Run a separate pipeline that calculates returns starting
# (regression_length - 1) days prior to our start date. This is
# because we need (regression_length - 1) extra days of returns to
# compute our expected regressions.
results = run_pipeline(
Pipeline(columns={'returns': returns}),
dates[start_date_index - (regression_length - 1)],
dates[end_date_index],
)
returns_results = results['returns'].unstack()
# On each day, calculate the expected regression results for Y ~ X
# where Y is the asset we are interested in and X is each other
# asset. Each regression is calculated over `regression_length`
# days of data.
for day in range(num_days):
todays_returns = returns_results.iloc[
day:day + regression_length
]
my_asset_returns = todays_returns.iloc[:, my_asset_column]
for asset, other_asset_returns in todays_returns.iteritems():
asset_column = int(asset) - 1
expected_regression_results = linregress(
y=other_asset_returns, x=my_asset_returns,
)
for i, output in enumerate(outputs):
expected_output_results[output][day, asset_column] = \
expected_regression_results[i]
for output in outputs:
output_result = output_results[output]
expected_output_result = DataFrame(
where(expected_mask, expected_output_results[output], nan),
index=dates[start_date_index:end_date_index + 1],
columns=assets,
)
assert_frame_equal(output_result, expected_output_result)
def _test_correlation_and_regression_with_bad_asset(self):
"""
Test that `RollingPearsonOfReturns`, `RollingSpearmanOfReturns` and
`RollingLinearRegressionOfReturns` raise the proper exception when
given a nonexistent target asset.
"""
my_asset = Equity(0, exchange="TEST")
start_date = self.pipeline_start_date
end_date = self.pipeline_end_date
run_pipeline = self.run_pipeline
# This filter is arbitrary; the important thing is that we test each
# factor both with and without a specified mask.
my_asset_filter = AssetID().eq(1)
for mask in (NotSpecified, my_asset_filter):
pearson_factor = RollingPearsonOfReturns(
target=my_asset,
returns_length=3,
correlation_length=3,
mask=mask,
)
spearman_factor = RollingSpearmanOfReturns(
target=my_asset,
returns_length=3,
correlation_length=3,
mask=mask,
)
regression_factor = RollingLinearRegressionOfReturns(
target=my_asset,
returns_length=3,
regression_length=3,
mask=mask,
)
with self.assertRaises(NonExistentAssetInTimeFrame):
run_pipeline(
Pipeline(columns={'pearson_factor': pearson_factor}),
start_date,
end_date,
)
with self.assertRaises(NonExistentAssetInTimeFrame):
run_pipeline(
Pipeline(columns={'spearman_factor': spearman_factor}),
start_date,
end_date,
)
with self.assertRaises(NonExistentAssetInTimeFrame):
run_pipeline(
Pipeline(columns={'regression_factor': regression_factor}),
start_date,
end_date,
)
def test_require_length_greater_than_one(self):
my_asset = Equity(0, exchange="TEST")
with self.assertRaises(ValueError):
RollingPearsonOfReturns(
target=my_asset,
returns_length=3,
correlation_length=1,
)
with self.assertRaises(ValueError):
RollingSpearmanOfReturns(
target=my_asset,
returns_length=3,
correlation_length=1,
)
with self.assertRaises(ValueError):
RollingLinearRegressionOfReturns(
target=my_asset,
returns_length=3,
regression_length=1,
)
class StatisticalMethodsTestCase(WithSeededRandomPipelineEngine,
CatalystTestCase):
sids = ASSET_FINDER_EQUITY_SIDS = Int64Index([1, 2, 3])
START_DATE = | Timestamp('2015-01-31', tz='UTC') | pandas.Timestamp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.